github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/intprotocol/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package intprotocol
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"github.com/intfoundation/intchain/core/rawdb"
    24  	"math"
    25  	"math/big"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/intfoundation/intchain/common"
    31  	"github.com/intfoundation/intchain/consensus"
    32  	"github.com/intfoundation/intchain/core"
    33  	"github.com/intfoundation/intchain/core/types"
    34  	"github.com/intfoundation/intchain/crypto"
    35  	"github.com/intfoundation/intchain/event"
    36  	"github.com/intfoundation/intchain/intdb"
    37  	"github.com/intfoundation/intchain/intprotocol/downloader"
    38  	"github.com/intfoundation/intchain/intprotocol/fetcher"
    39  	"github.com/intfoundation/intchain/log"
    40  	"github.com/intfoundation/intchain/p2p"
    41  	"github.com/intfoundation/intchain/p2p/discover"
    42  	"github.com/intfoundation/intchain/params"
    43  	"github.com/intfoundation/intchain/rlp"
    44  )
    45  
    46  const (
    47  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    48  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    49  
    50  	// txChanSize is the size of channel listening to TxPreEvent.
    51  	// The number is referenced from the size of tx pool.
    52  	txChanSize = 4096
    53  
    54  	// tx3PrfDtChainSize is the size of channel listening to Tx3PrfDataEvent.
    55  	// The number is referenced from the size of tx pool.
    56  	tx3PrfDtChainSize = 4096
    57  )
    58  
    59  var (
    60  	daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge
    61  )
    62  
    63  // errIncompatibleConfig is returned if the requested protocols and configs are
    64  // not compatible (low protocol version restrictions and high requirements).
    65  var errIncompatibleConfig = errors.New("incompatible configuration")
    66  
    67  func errResp(code errCode, format string, v ...interface{}) error {
    68  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    69  }
    70  
    71  type ProtocolManager struct {
    72  	networkId uint64
    73  
    74  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    75  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    76  
    77  	txpool      txPool
    78  	blockchain  *core.BlockChain
    79  	chainconfig *params.ChainConfig
    80  	maxPeers    int
    81  
    82  	downloader *downloader.Downloader
    83  	fetcher    *fetcher.Fetcher
    84  	peers      *peerSet
    85  
    86  	SubProtocols []p2p.Protocol
    87  
    88  	eventMux *event.TypeMux
    89  	txCh     chan core.TxPreEvent
    90  	txSub    event.Subscription
    91  
    92  	tx3PrfDtCh    chan core.Tx3ProofDataEvent
    93  	tx3PrfDtFeed  event.Feed
    94  	tx3PrfDtScope event.SubscriptionScope
    95  	tx3PrfDtSub   event.Subscription
    96  
    97  	minedBlockSub *event.TypeMuxSubscription
    98  
    99  	// channels for fetcher, syncer, txsyncLoop
   100  	newPeerCh   chan *peer
   101  	txsyncCh    chan *txsync
   102  	quitSync    chan struct{}
   103  	noMorePeers chan struct{}
   104  
   105  	// wait group is used for graceful shutdowns during downloading
   106  	// and processing
   107  	wg sync.WaitGroup
   108  
   109  	engine consensus.Engine
   110  
   111  	cch core.CrossChainHelper
   112  
   113  	logger         log.Logger
   114  	preimageLogger log.Logger
   115  }
   116  
   117  // NewProtocolManager returns a new INT Chain sub protocol manager. The INT Chain sub protocol manages peers capable
   118  // with the INT Chain network.
   119  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb intdb.Database, cch core.CrossChainHelper) (*ProtocolManager, error) {
   120  	// Create the protocol manager with the base fields
   121  	manager := &ProtocolManager{
   122  		networkId:      networkId,
   123  		eventMux:       mux,
   124  		txpool:         txpool,
   125  		blockchain:     blockchain,
   126  		chainconfig:    config,
   127  		peers:          newPeerSet(),
   128  		newPeerCh:      make(chan *peer),
   129  		noMorePeers:    make(chan struct{}),
   130  		txsyncCh:       make(chan *txsync),
   131  		quitSync:       make(chan struct{}),
   132  		engine:         engine,
   133  		cch:            cch,
   134  		logger:         config.ChainLogger,
   135  		preimageLogger: config.ChainLogger.New("module", "preimages"),
   136  	}
   137  
   138  	if handler, ok := manager.engine.(consensus.Handler); ok {
   139  		handler.SetBroadcaster(manager)
   140  	}
   141  
   142  	// Figure out whether to allow fast sync or not
   143  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   144  		manager.logger.Warn("Blockchain not empty, fast sync disabled")
   145  		mode = downloader.FullSync
   146  	}
   147  	if mode == downloader.FastSync {
   148  		manager.fastSync = uint32(1)
   149  	}
   150  	protocol := engine.Protocol()
   151  	// Initiate a sub-protocol for every implemented version we can handle
   152  	manager.SubProtocols = make([]p2p.Protocol, 0, len(protocol.Versions))
   153  	for i, version := range protocol.Versions {
   154  		// Skip protocol version if incompatible with the mode of operation
   155  		if mode == downloader.FastSync && version < consensus.Eth63 {
   156  			continue
   157  		}
   158  		// Compatible; initialise the sub-protocol
   159  		version := version // Closure for the run
   160  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   161  			Name:    protocol.Name,
   162  			Version: version,
   163  			Length:  protocol.Lengths[i],
   164  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   165  				peer := manager.newPeer(int(version), p, rw)
   166  				select {
   167  				case manager.newPeerCh <- peer:
   168  					manager.wg.Add(1)
   169  					defer manager.wg.Done()
   170  					return manager.handle(peer)
   171  				case <-manager.quitSync:
   172  					return p2p.DiscQuitting
   173  				}
   174  			},
   175  			NodeInfo: func() interface{} {
   176  				return manager.NodeInfo()
   177  			},
   178  			PeerInfo: func(id discover.NodeID) interface{} {
   179  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   180  					return p.Info()
   181  				}
   182  				return nil
   183  			},
   184  		})
   185  	}
   186  	if len(manager.SubProtocols) == 0 {
   187  		return nil, errIncompatibleConfig
   188  	}
   189  	// Construct the different synchronisation mechanisms
   190  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer, manager.logger)
   191  
   192  	validator := func(header *types.Header) error {
   193  		return engine.VerifyHeader(blockchain, header, true)
   194  	}
   195  	heighter := func() uint64 {
   196  		return blockchain.CurrentBlock().NumberU64()
   197  	}
   198  	inserter := func(blocks types.Blocks) (int, error) {
   199  		// If fast sync is running, deny importing weird blocks
   200  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   201  			manager.logger.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   202  			return 0, nil
   203  		}
   204  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   205  		return manager.blockchain.InsertChain(blocks)
   206  	}
   207  	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   208  
   209  	return manager, nil
   210  }
   211  
   212  func (pm *ProtocolManager) removePeer(id string) {
   213  	// Short circuit if the peer was already removed
   214  	peer := pm.peers.Peer(id)
   215  	if peer == nil {
   216  		return
   217  	}
   218  	pm.logger.Debug("Removing INT Chain peer", "peer", id)
   219  
   220  	// Unregister the peer from the downloader and INT Chain peer set
   221  	pm.downloader.UnregisterPeer(id)
   222  	if err := pm.peers.Unregister(id); err != nil {
   223  		pm.logger.Error("Peer removal failed", "peer", id, "err", err)
   224  	}
   225  	// Hard disconnect at the networking layer
   226  	if peer != nil {
   227  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   228  	}
   229  }
   230  
   231  func (pm *ProtocolManager) Start(maxPeers int) {
   232  	pm.maxPeers = maxPeers
   233  
   234  	// broadcast transactions
   235  	pm.txCh = make(chan core.TxPreEvent, txChanSize)
   236  	pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh)
   237  	go pm.txBroadcastLoop()
   238  
   239  	pm.tx3PrfDtCh = make(chan core.Tx3ProofDataEvent, tx3PrfDtChainSize)
   240  	pm.tx3PrfDtSub = pm.tx3PrfDtScope.Track(pm.tx3PrfDtFeed.Subscribe(pm.tx3PrfDtCh))
   241  	go pm.tx3PrfDtBroadcastLoop()
   242  
   243  	// broadcast mined blocks
   244  	pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   245  	go pm.minedBroadcastLoop()
   246  
   247  	// start sync handlers
   248  	go pm.syncer()
   249  	go pm.txsyncLoop()
   250  }
   251  
   252  func (pm *ProtocolManager) Stop() {
   253  	pm.logger.Info("Stopping INT Chain protocol")
   254  
   255  	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
   256  	pm.tx3PrfDtSub.Unsubscribe()   // quits tx3PrfDtBroadcastLoop
   257  	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   258  
   259  	// Quit the sync loop.
   260  	// After this send has completed, no new peers will be accepted.
   261  	pm.noMorePeers <- struct{}{}
   262  
   263  	// Quit fetcher, txsyncLoop.
   264  	close(pm.quitSync)
   265  
   266  	// Disconnect existing sessions.
   267  	// This also closes the gate for any new registrations on the peer set.
   268  	// sessions which are already established but not added to pm.peers yet
   269  	// will exit when they try to register.
   270  	pm.peers.Close()
   271  
   272  	// Wait for all peer handler goroutines and the loops to come down.
   273  	pm.wg.Wait()
   274  
   275  	pm.logger.Info("INT Chain protocol stopped")
   276  }
   277  
   278  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   279  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   280  }
   281  
   282  // handle is the callback invoked to manage the life cycle of an intprotocol peer. When
   283  // this function terminates, the peer is disconnected.
   284  func (pm *ProtocolManager) handle(p *peer) error {
   285  	// Ignore maxPeers if this is a trusted peer
   286  	if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
   287  		return p2p.DiscTooManyPeers
   288  	}
   289  	p.Log().Debug("INT Chain peer connected", "name", p.Name())
   290  
   291  	// Execute the INT Chain handshake
   292  	var (
   293  		genesis = pm.blockchain.Genesis()
   294  		head    = pm.blockchain.CurrentHeader()
   295  		hash    = head.Hash()
   296  		number  = head.Number.Uint64()
   297  		td      = pm.blockchain.GetTd(hash, number)
   298  	)
   299  	if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
   300  		p.Log().Debug("INT Chain handshake failed", "err", err)
   301  		return err
   302  	}
   303  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   304  		rw.Init(p.version)
   305  	}
   306  	// Register the peer locally
   307  	if err := pm.peers.Register(p); err != nil {
   308  		p.Log().Error("INT Chain peer registration failed", "err", err)
   309  		return err
   310  	}
   311  
   312  	defer func() {
   313  		pm.removePeer(p.id)
   314  		if handler, ok := pm.engine.(consensus.Handler); ok {
   315  			handler.RemovePeer(p)
   316  		}
   317  	}()
   318  
   319  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   320  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   321  		return err
   322  	}
   323  	// Propagate existing transactions. new transactions appearing
   324  	// after this will be sent via broadcasts.
   325  	pm.syncTransactions(p)
   326  
   327  	// Add Peer to Consensus Engine
   328  	if handler, ok := pm.engine.(consensus.Handler); ok {
   329  		handler.AddPeer(p)
   330  	} else {
   331  		p.Log().Info("AddPeer not executed")
   332  	}
   333  
   334  	// main loop. handle incoming messages.
   335  	for {
   336  		if err := pm.handleMsg(p); err != nil {
   337  			p.Log().Debug("INT Chain message handling failed", "err", err)
   338  			return err
   339  		}
   340  	}
   341  }
   342  
   343  // handleMsg is invoked whenever an inbound message is received from a remote
   344  // peer. The remote connection is torn down upon returning any error.
   345  func (pm *ProtocolManager) handleMsg(p *peer) error {
   346  	// Read the next message from the remote peer, and ensure it's fully consumed
   347  	msg, err := p.rw.ReadMsg()
   348  	if err != nil {
   349  		return err
   350  	}
   351  	if msg.Size > ProtocolMaxMsgSize {
   352  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   353  	}
   354  	defer msg.Discard()
   355  
   356  	// Handle the message depending on its contents
   357  	switch {
   358  	// IntChain Consensus Message
   359  	case msg.Code >= 0x20 && msg.Code <= 0x23:
   360  		if handler, ok := pm.engine.(consensus.Handler); ok {
   361  			var msgBytes []byte
   362  			if err := msg.Decode(&msgBytes); err != nil {
   363  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   364  			}
   365  			handler.HandleMsg(msg.Code, p, msgBytes)
   366  		}
   367  	case msg.Code == StatusMsg:
   368  		// Status messages should never arrive after the handshake
   369  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   370  
   371  	// Block header query, collect the requested headers and reply
   372  	case msg.Code == GetBlockHeadersMsg:
   373  		// Decode the complex header query
   374  		var query getBlockHeadersData
   375  		if err := msg.Decode(&query); err != nil {
   376  			return errResp(ErrDecode, "%v: %v", msg, err)
   377  		}
   378  		hashMode := query.Origin.Hash != (common.Hash{})
   379  
   380  		// Gather headers until the fetch or network limits is reached
   381  		var (
   382  			bytes   common.StorageSize
   383  			headers []*types.Header
   384  			unknown bool
   385  		)
   386  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   387  			// Retrieve the next header satisfying the query
   388  			var origin *types.Header
   389  			if hashMode {
   390  				origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   391  			} else {
   392  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   393  			}
   394  			if origin == nil {
   395  				break
   396  			}
   397  			number := origin.Number.Uint64()
   398  			headers = append(headers, origin)
   399  			bytes += estHeaderRlpSize
   400  
   401  			// Advance to the next header of the query
   402  			switch {
   403  			case query.Origin.Hash != (common.Hash{}) && query.Reverse:
   404  				// Hash based traversal towards the genesis block
   405  				for i := 0; i < int(query.Skip)+1; i++ {
   406  					if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
   407  						query.Origin.Hash = header.ParentHash
   408  						number--
   409  					} else {
   410  						unknown = true
   411  						break
   412  					}
   413  				}
   414  			case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
   415  				// Hash based traversal towards the leaf block
   416  				var (
   417  					current = origin.Number.Uint64()
   418  					next    = current + query.Skip + 1
   419  				)
   420  				if next <= current {
   421  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   422  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   423  					unknown = true
   424  				} else {
   425  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   426  						if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
   427  							query.Origin.Hash = header.Hash()
   428  						} else {
   429  							unknown = true
   430  						}
   431  					} else {
   432  						unknown = true
   433  					}
   434  				}
   435  			case query.Reverse:
   436  				// Number based traversal towards the genesis block
   437  				if query.Origin.Number >= query.Skip+1 {
   438  					query.Origin.Number -= query.Skip + 1
   439  				} else {
   440  					unknown = true
   441  				}
   442  
   443  			case !query.Reverse:
   444  				// Number based traversal towards the leaf block
   445  				query.Origin.Number += query.Skip + 1
   446  			}
   447  		}
   448  		return p.SendBlockHeaders(headers)
   449  
   450  	case msg.Code == BlockHeadersMsg:
   451  		// A batch of headers arrived to one of our previous requests
   452  		var headers []*types.Header
   453  		if err := msg.Decode(&headers); err != nil {
   454  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   455  		}
   456  
   457  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   458  		filter := len(headers) == 1
   459  		if filter {
   460  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   461  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   462  		}
   463  		if len(headers) > 0 || !filter {
   464  			err := pm.downloader.DeliverHeaders(p.id, headers)
   465  			if err != nil {
   466  				pm.logger.Debug("Failed to deliver headers", "err", err)
   467  			}
   468  		}
   469  
   470  	case msg.Code == GetBlockBodiesMsg:
   471  		// Decode the retrieval message
   472  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   473  		if _, err := msgStream.List(); err != nil {
   474  			return err
   475  		}
   476  		// Gather blocks until the fetch or network limits is reached
   477  		var (
   478  			hash   common.Hash
   479  			bytes  int
   480  			bodies []rlp.RawValue
   481  		)
   482  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   483  			// Retrieve the hash of the next block
   484  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   485  				break
   486  			} else if err != nil {
   487  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   488  			}
   489  			// Retrieve the requested block body, stopping if enough was found
   490  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   491  				bodies = append(bodies, data)
   492  				bytes += len(data)
   493  			}
   494  		}
   495  		return p.SendBlockBodiesRLP(bodies)
   496  
   497  	case msg.Code == BlockBodiesMsg:
   498  		// A batch of block bodies arrived to one of our previous requests
   499  		var request blockBodiesData
   500  		if err := msg.Decode(&request); err != nil {
   501  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   502  		}
   503  		// Deliver them all to the downloader for queuing
   504  		trasactions := make([][]*types.Transaction, len(request))
   505  		uncles := make([][]*types.Header, len(request))
   506  
   507  		for i, body := range request {
   508  			trasactions[i] = body.Transactions
   509  			uncles[i] = body.Uncles
   510  		}
   511  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   512  		filter := len(trasactions) > 0 || len(uncles) > 0
   513  		if filter {
   514  			trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
   515  		}
   516  		if len(trasactions) > 0 || len(uncles) > 0 || !filter {
   517  			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
   518  			if err != nil {
   519  				pm.logger.Debug("Failed to deliver bodies", "err", err)
   520  			}
   521  		}
   522  
   523  	case p.version >= consensus.Eth63 && msg.Code == GetNodeDataMsg:
   524  		// Decode the retrieval message
   525  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   526  		if _, err := msgStream.List(); err != nil {
   527  			return err
   528  		}
   529  		// Gather state data until the fetch or network limits is reached
   530  		var (
   531  			hash  common.Hash
   532  			bytes int
   533  			data  [][]byte
   534  		)
   535  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   536  			// Retrieve the hash of the next state entry
   537  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   538  				break
   539  			} else if err != nil {
   540  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   541  			}
   542  			// Retrieve the requested state entry, stopping if enough was found
   543  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   544  				data = append(data, entry)
   545  				bytes += len(entry)
   546  			}
   547  		}
   548  		return p.SendNodeData(data)
   549  
   550  	case p.version >= consensus.Eth63 && msg.Code == NodeDataMsg:
   551  		// A batch of node state data arrived to one of our previous requests
   552  		var data [][]byte
   553  		if err := msg.Decode(&data); err != nil {
   554  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   555  		}
   556  		// Deliver all to the downloader
   557  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   558  			pm.logger.Debug("Failed to deliver node state data", "err", err)
   559  		}
   560  
   561  	case p.version >= consensus.Eth63 && msg.Code == GetReceiptsMsg:
   562  		// Decode the retrieval message
   563  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   564  		if _, err := msgStream.List(); err != nil {
   565  			return err
   566  		}
   567  		// Gather state data until the fetch or network limits is reached
   568  		var (
   569  			hash     common.Hash
   570  			bytes    int
   571  			receipts []rlp.RawValue
   572  		)
   573  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   574  			// Retrieve the hash of the next block
   575  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   576  				break
   577  			} else if err != nil {
   578  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   579  			}
   580  			// Retrieve the requested block's receipts, skipping if unknown to us
   581  			results := pm.blockchain.GetReceiptsByHash(hash)
   582  			if results == nil {
   583  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   584  					continue
   585  				}
   586  			}
   587  			// If known, encode and queue for response packet
   588  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   589  				pm.logger.Error("Failed to encode receipt", "err", err)
   590  			} else {
   591  				receipts = append(receipts, encoded)
   592  				bytes += len(encoded)
   593  			}
   594  		}
   595  		return p.SendReceiptsRLP(receipts)
   596  
   597  	case p.version >= consensus.Eth63 && msg.Code == ReceiptsMsg:
   598  		// A batch of receipts arrived to one of our previous requests
   599  		var receipts [][]*types.Receipt
   600  		if err := msg.Decode(&receipts); err != nil {
   601  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   602  		}
   603  		// Deliver all to the downloader
   604  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   605  			pm.logger.Debug("Failed to deliver receipts", "err", err)
   606  		}
   607  
   608  	case msg.Code == NewBlockHashesMsg:
   609  		var announces newBlockHashesData
   610  		if err := msg.Decode(&announces); err != nil {
   611  			return errResp(ErrDecode, "%v: %v", msg, err)
   612  		}
   613  		// Mark the hashes as present at the remote node
   614  		for _, block := range announces {
   615  			p.MarkBlock(block.Hash)
   616  		}
   617  		// Schedule all the unknown hashes for retrieval
   618  		unknown := make(newBlockHashesData, 0, len(announces))
   619  		for _, block := range announces {
   620  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   621  				unknown = append(unknown, block)
   622  			}
   623  		}
   624  		for _, block := range unknown {
   625  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   626  		}
   627  
   628  	case msg.Code == NewBlockMsg:
   629  		// Retrieve and decode the propagated block
   630  		var request newBlockData
   631  		if err := msg.Decode(&request); err != nil {
   632  			return errResp(ErrDecode, "%v: %v", msg, err)
   633  		}
   634  		request.Block.ReceivedAt = msg.ReceivedAt
   635  		request.Block.ReceivedFrom = p
   636  
   637  		// Mark the peer as owning the block and schedule it for import
   638  		p.MarkBlock(request.Block.Hash())
   639  		pm.fetcher.Enqueue(p.id, request.Block)
   640  
   641  		// Assuming the block is importable by the peer, but possibly not yet done so,
   642  		// calculate the head hash and TD that the peer truly must have.
   643  		var (
   644  			trueHead = request.Block.ParentHash()
   645  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   646  		)
   647  		// Update the peers total difficulty if better than the previous
   648  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   649  			p.SetHead(trueHead, trueTD)
   650  
   651  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   652  			// a singe block (as the true TD is below the propagated block), however this
   653  			// scenario should easily be covered by the fetcher.
   654  			currentBlock := pm.blockchain.CurrentBlock()
   655  			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   656  				go pm.synchronise(p)
   657  			}
   658  		}
   659  
   660  	case msg.Code == TxMsg:
   661  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   662  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   663  			break
   664  		}
   665  		// Transactions can be processed, parse all of them and deliver to the pool
   666  		var txs []*types.Transaction
   667  		if err := msg.Decode(&txs); err != nil {
   668  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   669  		}
   670  		for i, tx := range txs {
   671  			// Validate and mark the remote transaction
   672  			if tx == nil {
   673  				return errResp(ErrDecode, "transaction %d is nil", i)
   674  			}
   675  			p.MarkTransaction(tx.Hash())
   676  		}
   677  		pm.txpool.AddRemotes(txs)
   678  
   679  	case msg.Code == TX3ProofDataMsg:
   680  		pm.logger.Debug("TX3ProofDataMsg received")
   681  		var proofDatas []*types.TX3ProofData
   682  		if err := msg.Decode(&proofDatas); err != nil {
   683  			pm.logger.Error("TX3ProofDataMsg decode error", "msg", msg, "error", err)
   684  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   685  		}
   686  		for _, proofData := range proofDatas {
   687  			// Validate and mark the remote TX3ProofData
   688  			if err := pm.cch.ValidateTX3ProofData(proofData); err != nil {
   689  				pm.logger.Error("TX3ProofDataMsg validate error", "msg", msg, "error", err)
   690  				return errResp(ErrTX3ValidateFail, "msg %v: %v", msg, err)
   691  			}
   692  			p.MarkTX3ProofData(proofData.Header.Hash())
   693  			// Write the remote TX3ProofData
   694  			if err := pm.cch.WriteTX3ProofData(proofData); err != nil {
   695  				pm.logger.Error("TX3ProofDataMsg write error", "msg", msg, "error", err)
   696  			}
   697  
   698  			go pm.tx3PrfDtFeed.Send(core.Tx3ProofDataEvent{proofData})
   699  		}
   700  
   701  	case msg.Code == GetPreImagesMsg:
   702  		pm.preimageLogger.Debug("GetPreImagesMsg received")
   703  		// Decode the retrieval message
   704  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   705  		if _, err := msgStream.List(); err != nil {
   706  			return err
   707  		}
   708  		// Gather state data until the fetch or network limits is reached
   709  		var (
   710  			hash      common.Hash
   711  			bytes     int
   712  			preimages [][]byte
   713  		)
   714  
   715  		for bytes < softResponseLimit && len(preimages) < downloader.MaxReceiptFetch {
   716  			// Retrieve the hash of the next block
   717  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   718  				break
   719  			} else if err != nil {
   720  
   721  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   722  			}
   723  			// Retrieve the requested block's receipts, skipping if unknown to us
   724  			preimage := rawdb.ReadPreimage(pm.blockchain.StateCache().TrieDB().DiskDB(), hash)
   725  			// Double check the local preimage
   726  			if hash != crypto.Keccak256Hash(preimage) {
   727  				pm.preimageLogger.Errorf("Failed to pass the preimage double check. Request hash %x, Local Preimage %x", hash, preimage)
   728  				continue
   729  			}
   730  
   731  			preimages = append(preimages, preimage)
   732  			bytes += len(preimage)
   733  		}
   734  		return p.SendPreimagesRLP(preimages)
   735  
   736  	case msg.Code == PreImagesMsg:
   737  		pm.preimageLogger.Debug("PreImagesMsg received")
   738  		var preimages [][]byte
   739  		if err := msg.Decode(&preimages); err != nil {
   740  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   741  		}
   742  
   743  		preimagesMap := make(map[common.Hash][]byte)
   744  		for _, preimage := range preimages {
   745  			pm.preimageLogger.Debugf("PreImagesMsg received: %x", preimage)
   746  			preimagesMap[crypto.Keccak256Hash(preimage)] = common.CopyBytes(preimage)
   747  		}
   748  		if len(preimagesMap) > 0 {
   749  			db, _ := pm.blockchain.StateCache().TrieDB().DiskDB().(intdb.Database)
   750  			rawdb.WritePreimages(db, preimagesMap)
   751  			pm.preimageLogger.Info("PreImages wrote into database")
   752  		}
   753  	case msg.Code == TrieNodeDataMsg:
   754  		pm.logger.Debug("TrieNodeDataMsg received")
   755  		var trienodes [][]byte
   756  		if err := msg.Decode(&trienodes); err != nil {
   757  			pm.logger.Warnf("Unable decode TrieNodeData %v", err)
   758  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   759  		}
   760  		pm.logger.Debugf("%d TrieNodeData received", len(trienodes))
   761  
   762  		db, _ := pm.blockchain.StateCache().TrieDB().DiskDB().(intdb.Database)
   763  		for _, tnode := range trienodes {
   764  			thash := crypto.Keccak256Hash(tnode)
   765  			if has, herr := db.Has(thash.Bytes()); !has && herr == nil {
   766  				puterr := db.Put(thash.Bytes(), tnode)
   767  				if puterr == nil {
   768  					pm.logger.Debugf("Insert TrieNodeData %x", thash)
   769  				}
   770  			} else if has {
   771  				pm.logger.Debugf("TrieNodeData %x already existed", thash)
   772  			}
   773  		}
   774  	default:
   775  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   776  	}
   777  	return nil
   778  }
   779  
   780  func (pm *ProtocolManager) Enqueue(id string, block *types.Block) {
   781  	pm.fetcher.Enqueue(id, block)
   782  }
   783  
   784  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   785  // will only announce it's availability (depending what's requested).
   786  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   787  	hash := block.Hash()
   788  	peers := pm.peers.PeersWithoutBlock(hash)
   789  
   790  	// If propagation is requested, send to a subset of the peer
   791  	if propagate {
   792  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   793  		var td *big.Int
   794  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   795  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   796  		} else {
   797  			pm.logger.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   798  			return
   799  		}
   800  		// Send the block to a subset of our peers
   801  		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
   802  		for _, peer := range transfer {
   803  			peer.SendNewBlock(block, td)
   804  		}
   805  		pm.logger.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   806  		return
   807  	}
   808  	// Otherwise if the block is indeed in out own chain, announce it
   809  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   810  		for _, peer := range peers {
   811  			peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
   812  		}
   813  		pm.logger.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   814  	}
   815  }
   816  
   817  // BroadcastTx will propagate a transaction to all peers which are not known to
   818  // already have the given transaction.
   819  func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
   820  	// Broadcast transaction to a batch of peers not knowing about it
   821  	peers := pm.peers.PeersWithoutTx(hash)
   822  	//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   823  	for _, peer := range peers {
   824  		peer.SendTransactions(types.Transactions{tx})
   825  	}
   826  	pm.logger.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers))
   827  }
   828  
   829  // BroadcastTX3ProofData will propagate a TX3ProofData to all peers which are not known to
   830  // already have the given TX3ProofData.
   831  func (pm *ProtocolManager) BroadcastTX3ProofData(hash common.Hash, proofData *types.TX3ProofData) {
   832  	// Broadcast TX3ProofData to a batch of peers not knowing about it
   833  	peers := pm.peers.PeersWithoutTX3ProofData(hash)
   834  	for _, peer := range peers {
   835  		peer.SendTX3ProofData([]*types.TX3ProofData{proofData})
   836  	}
   837  	pm.logger.Trace("Broadcast TX3ProofData", "hash", hash, "recipients", len(peers))
   838  }
   839  
   840  func (pm *ProtocolManager) BroadcastMessage(msgcode uint64, data interface{}) {
   841  	recipients := 0
   842  	for _, peer := range pm.peers.Peers() {
   843  		peer.Send(msgcode, data)
   844  		recipients++
   845  	}
   846  	pm.logger.Trace("Broadcast p2p message", "code", msgcode, "recipients", recipients, "msg", data)
   847  }
   848  
   849  func (pm *ProtocolManager) TryFixBadPreimages() {
   850  	// Record all preimages (Testing)
   851  	images := make(map[common.Hash][]byte)
   852  
   853  	var hashes []common.Hash
   854  
   855  	// Iterate the entire sha3 preimages for checking
   856  	db, _ := pm.blockchain.StateCache().TrieDB().DiskDB().(intdb.Database)
   857  	it := db.NewIteratorWithPrefix([]byte("secure-key-"))
   858  	for it.Next() {
   859  		keyHash := common.BytesToHash(it.Key())
   860  		valueHash := crypto.Keccak256Hash(it.Value())
   861  		if keyHash != valueHash {
   862  			// If value's hash doesn't match the key hash, add to list and send to other peer for correct
   863  			hashes = append(hashes, keyHash)
   864  		}
   865  		// Add to all preimages (Testing)
   866  		images[keyHash] = common.CopyBytes(it.Value())
   867  	}
   868  	it.Release()
   869  
   870  	if len(hashes) > 0 {
   871  		pm.preimageLogger.Critf("Found %d Bad Preimage(s)", len(hashes))
   872  		pm.preimageLogger.Critf("Bad Preimages: %x", hashes)
   873  
   874  		// Print all preimages (Testing)
   875  		//pm.preimageLogger.Crit("All Preimage(s)")
   876  		//var list []common.Hash
   877  		//for k := range images {
   878  		//	list = append(list, k)
   879  		//}
   880  		//sort.Slice(list, func(i, j int) bool {
   881  		//	return bytes.Compare(list[i][:], list[j][:]) == 1
   882  		//})
   883  		//for _, k := range list {
   884  		//	pm.preimageLogger.Crit(k.Hex(), common.Bytes2Hex(images[k]))
   885  		//}
   886  
   887  		//panic("Stop the system when found bad preimages, for testing purpose")
   888  
   889  		pm.peers.BestPeer().RequestPreimages(hashes)
   890  	}
   891  
   892  }
   893  
   894  // Mined broadcast loop
   895  func (self *ProtocolManager) minedBroadcastLoop() {
   896  	// automatically stops if unsubscribe
   897  	for obj := range self.minedBlockSub.Chan() {
   898  		switch ev := obj.Data.(type) {
   899  		case core.NewMinedBlockEvent:
   900  			self.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   901  			self.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   902  		}
   903  	}
   904  }
   905  
   906  func (self *ProtocolManager) txBroadcastLoop() {
   907  	for {
   908  		select {
   909  		case event := <-self.txCh:
   910  			self.BroadcastTx(event.Tx.Hash(), event.Tx)
   911  
   912  		// Err() channel will be closed when unsubscribing.
   913  		case <-self.txSub.Err():
   914  			return
   915  		}
   916  	}
   917  }
   918  
   919  func (self *ProtocolManager) tx3PrfDtBroadcastLoop() {
   920  	for {
   921  		select {
   922  		case event := <-self.tx3PrfDtCh:
   923  			self.BroadcastTX3ProofData(event.Tx3PrfDt.Header.Hash(), event.Tx3PrfDt)
   924  
   925  		// Err() channel will be closed when unsubscribing.
   926  		case <-self.tx3PrfDtSub.Err():
   927  			return
   928  		}
   929  	}
   930  }
   931  
   932  // NodeInfo represents a short summary of the INT Chain sub-protocol metadata
   933  // known about the host peer.
   934  type NodeInfo struct {
   935  	Network    uint64              `json:"network"`    // INT Chain network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
   936  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   937  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   938  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   939  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   940  }
   941  
   942  // NodeInfo retrieves some protocol metadata about the running host node.
   943  func (self *ProtocolManager) NodeInfo() *NodeInfo {
   944  	currentBlock := self.blockchain.CurrentBlock()
   945  	return &NodeInfo{
   946  		Network:    self.networkId,
   947  		Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   948  		Genesis:    self.blockchain.Genesis().Hash(),
   949  		Config:     self.blockchain.Config(),
   950  		Head:       currentBlock.Hash(),
   951  	}
   952  }
   953  
   954  func (self *ProtocolManager) FindPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
   955  	m := make(map[common.Address]consensus.Peer)
   956  	for _, p := range self.peers.Peers() {
   957  		pubKey, err := p.ID().Pubkey()
   958  		if err != nil {
   959  			continue
   960  		}
   961  		addr := crypto.PubkeyToAddress(*pubKey)
   962  		if targets[addr] {
   963  			m[addr] = p
   964  		}
   965  	}
   966  	return m
   967  }