github.com/klaytn/klaytn@v1.12.1/node/cn/handler.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of go-ethereum.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/handler.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package cn
    22  
    23  import (
    24  	"encoding/json"
    25  	"errors"
    26  	"fmt"
    27  	"math"
    28  	"math/big"
    29  	"math/rand"
    30  	"runtime/debug"
    31  	"sort"
    32  	"sync"
    33  	"sync/atomic"
    34  	"time"
    35  
    36  	"github.com/klaytn/klaytn/blockchain"
    37  	"github.com/klaytn/klaytn/blockchain/types"
    38  	"github.com/klaytn/klaytn/common"
    39  	"github.com/klaytn/klaytn/consensus"
    40  	"github.com/klaytn/klaytn/consensus/istanbul"
    41  	"github.com/klaytn/klaytn/crypto"
    42  	"github.com/klaytn/klaytn/datasync/downloader"
    43  	"github.com/klaytn/klaytn/datasync/fetcher"
    44  	"github.com/klaytn/klaytn/event"
    45  	"github.com/klaytn/klaytn/networks/p2p"
    46  	"github.com/klaytn/klaytn/networks/p2p/discover"
    47  	"github.com/klaytn/klaytn/node/cn/snap"
    48  	"github.com/klaytn/klaytn/params"
    49  	"github.com/klaytn/klaytn/reward"
    50  	"github.com/klaytn/klaytn/rlp"
    51  	"github.com/klaytn/klaytn/storage/database"
    52  	"github.com/klaytn/klaytn/storage/statedb"
    53  	"github.com/klaytn/klaytn/work"
    54  )
    55  
    56  const (
    57  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    58  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    59  
    60  	// txChanSize is the size of channel listening to NewTxsEvent.
    61  	// The number is referenced from the size of tx pool.
    62  	txChanSize = 4096
    63  
    64  	concurrentPerPeer  = 3
    65  	channelSizePerPeer = 20
    66  
    67  	blockReceivingPNLimit  = 5 // maximum number of PNs that a CN broadcasts block.
    68  	minNumPeersToSendBlock = 3 // minimum number of peers that a node broadcasts block.
    69  
    70  	// DefaultMaxResendTxCount is the number of resending transactions to peer in order to prevent the txs from missing.
    71  	DefaultMaxResendTxCount = 1000
    72  
    73  	// DefaultTxResendInterval is the second of resending transactions period.
    74  	DefaultTxResendInterval = 4
    75  
    76  	// ExtraNonSnapPeers is the number of non-snap peers allowed to connect more than snap peers.
    77  	ExtraNonSnapPeers = 5
    78  )
    79  
    80  // errIncompatibleConfig is returned if the requested protocols and configs are
    81  // not compatible (low protocol version restrictions and high requirements).
    82  var errIncompatibleConfig = errors.New("incompatible configuration")
    83  
    84  var (
    85  	errUnknownProcessingError  = errors.New("unknown error during the msg processing")
    86  	errUnsupportedEnginePolicy = errors.New("unsupported engine or policy")
    87  )
    88  
    89  func errResp(code errCode, format string, v ...interface{}) error {
    90  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    91  }
    92  
    93  type ProtocolManager struct {
    94  	networkId uint64
    95  
    96  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    97  	snapSync  uint32 // Flag whether fast sync should operate on top of the snap protocol
    98  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    99  
   100  	txpool      work.TxPool
   101  	blockchain  work.BlockChain
   102  	chainconfig *params.ChainConfig
   103  	maxPeers    int
   104  
   105  	downloader ProtocolManagerDownloader
   106  	fetcher    ProtocolManagerFetcher
   107  	peers      PeerSet
   108  
   109  	SubProtocols []p2p.Protocol
   110  
   111  	eventMux      *event.TypeMux
   112  	txsCh         chan blockchain.NewTxsEvent
   113  	txsSub        event.Subscription
   114  	minedBlockSub *event.TypeMuxSubscription
   115  
   116  	// channels for fetcher, syncer, txsyncLoop
   117  	newPeerCh   chan Peer
   118  	txsyncCh    chan *txsync
   119  	quitSync    chan struct{}
   120  	noMorePeers chan struct{}
   121  
   122  	quitResendCh chan struct{}
   123  	// wait group is used for graceful shutdowns during downloading
   124  	// and processing
   125  	wg     sync.WaitGroup
   126  	peerWg sync.WaitGroup
   127  	// istanbul BFT
   128  	engine consensus.Engine
   129  
   130  	wsendpoint string
   131  
   132  	nodetype          common.ConnType
   133  	txResendUseLegacy bool
   134  
   135  	// syncStop is a flag to stop peer sync
   136  	syncStop int32
   137  }
   138  
   139  // NewProtocolManager returns a new Klaytn sub protocol manager. The Klaytn sub protocol manages peers capable
   140  // with the Klaytn network.
   141  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux,
   142  	txpool work.TxPool, engine consensus.Engine, blockchain work.BlockChain, chainDB database.DBManager, cacheLimit int,
   143  	nodetype common.ConnType, cnconfig *Config,
   144  ) (*ProtocolManager, error) {
   145  	// Create the protocol maanger with the base fields
   146  	manager := &ProtocolManager{
   147  		networkId:         networkId,
   148  		eventMux:          mux,
   149  		txpool:            txpool,
   150  		blockchain:        blockchain,
   151  		chainconfig:       config,
   152  		peers:             newPeerSet(),
   153  		newPeerCh:         make(chan Peer),
   154  		noMorePeers:       make(chan struct{}),
   155  		txsyncCh:          make(chan *txsync),
   156  		quitSync:          make(chan struct{}),
   157  		quitResendCh:      make(chan struct{}),
   158  		engine:            engine,
   159  		nodetype:          nodetype,
   160  		txResendUseLegacy: cnconfig.TxResendUseLegacy,
   161  	}
   162  
   163  	// istanbul BFT
   164  	if handler, ok := engine.(consensus.Handler); ok {
   165  		handler.SetBroadcaster(manager, manager.nodetype)
   166  	}
   167  
   168  	// Figure out whether to allow fast sync or not
   169  	if (mode == downloader.FastSync || mode == downloader.SnapSync) && blockchain.CurrentBlock().NumberU64() > 0 {
   170  		logger.Error("Blockchain not empty, fast sync disabled")
   171  		mode = downloader.FullSync
   172  	}
   173  	if mode == downloader.FastSync {
   174  		manager.fastSync = uint32(1)
   175  		manager.snapSync = uint32(0)
   176  	}
   177  	if mode == downloader.SnapSync {
   178  		manager.fastSync = uint32(0)
   179  		manager.snapSync = uint32(1)
   180  	}
   181  	// istanbul BFT
   182  	protocol := engine.Protocol()
   183  	// Initiate a sub-protocol for every implemented version we can handle
   184  	manager.SubProtocols = make([]p2p.Protocol, 0, len(protocol.Versions))
   185  	for i, version := range protocol.Versions {
   186  		// Skip protocol version if incompatible with the mode of operation
   187  		if mode == downloader.FastSync && version < klay63 {
   188  			continue
   189  		}
   190  		// TODO-Klaytn-Snapsync add snapsync and version check here
   191  		if mode == downloader.SnapSync && version < klay65 {
   192  			continue
   193  		}
   194  		// Compatible; initialise the sub-protocol
   195  		version := version
   196  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   197  			Name:    protocol.Name,
   198  			Version: version,
   199  			Length:  protocol.Lengths[i],
   200  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   201  				peer := manager.newPeer(int(version), p, rw)
   202  				pubKey, err := p.ID().Pubkey()
   203  				if err != nil {
   204  					if p.ConnType() == common.CONSENSUSNODE {
   205  						return err
   206  					}
   207  					peer.SetAddr(common.Address{})
   208  				} else {
   209  					addr := crypto.PubkeyToAddress(*pubKey)
   210  					peer.SetAddr(addr)
   211  				}
   212  				select {
   213  				case manager.newPeerCh <- peer:
   214  					manager.wg.Add(1)
   215  					defer manager.wg.Done()
   216  					return manager.handle(peer)
   217  				case <-manager.quitSync:
   218  					return p2p.DiscQuitting
   219  				}
   220  			},
   221  			RunWithRWs: func(p *p2p.Peer, rws []p2p.MsgReadWriter) error {
   222  				peer, err := manager.newPeerWithRWs(int(version), p, rws)
   223  				if err != nil {
   224  					return err
   225  				}
   226  				pubKey, err := p.ID().Pubkey()
   227  				if err != nil {
   228  					if p.ConnType() == common.CONSENSUSNODE {
   229  						return err
   230  					}
   231  					peer.SetAddr(common.Address{})
   232  				} else {
   233  					addr := crypto.PubkeyToAddress(*pubKey)
   234  					peer.SetAddr(addr)
   235  				}
   236  				select {
   237  				case manager.newPeerCh <- peer:
   238  					manager.wg.Add(1)
   239  					defer manager.wg.Done()
   240  					return peer.Handle(manager)
   241  				case <-manager.quitSync:
   242  					return p2p.DiscQuitting
   243  				}
   244  			},
   245  			NodeInfo: func() interface{} {
   246  				return manager.NodeInfo()
   247  			},
   248  			PeerInfo: func(id discover.NodeID) interface{} {
   249  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   250  					return p.Info()
   251  				}
   252  				return nil
   253  			},
   254  		})
   255  
   256  		if cnconfig.SnapshotCacheSize > 0 {
   257  			for _, version := range snap.ProtocolVersions {
   258  				manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   259  					Name:    snap.ProtocolName,
   260  					Version: version,
   261  					Length:  snap.ProtocolLengths[version],
   262  					Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   263  						manager.wg.Add(1)
   264  						defer manager.wg.Done()
   265  						peer := snap.NewPeer(version, p, rw)
   266  						return manager.handleSnapPeer(peer)
   267  					},
   268  					RunWithRWs: func(p *p2p.Peer, rws []p2p.MsgReadWriter) error {
   269  						manager.wg.Add(1)
   270  						defer manager.wg.Done()
   271  						peer := snap.NewPeer(version, p, rws[p2p.ConnDefault])
   272  						return manager.handleSnapPeer(peer)
   273  					},
   274  					NodeInfo: func() interface{} {
   275  						return manager.NodeInfo()
   276  					},
   277  					PeerInfo: func(id discover.NodeID) interface{} {
   278  						if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   279  							return p.Info()
   280  						}
   281  						return nil
   282  					},
   283  				})
   284  			}
   285  		}
   286  	}
   287  
   288  	if len(manager.SubProtocols) == 0 {
   289  		return nil, errIncompatibleConfig
   290  	}
   291  
   292  	// Create and set downloader
   293  	if cnconfig.DownloaderDisable {
   294  		manager.downloader = downloader.NewFakeDownloader()
   295  	} else {
   296  		// Construct the downloader (long sync) and its backing state bloom if fast
   297  		// sync is requested. The downloader is responsible for deallocating the state
   298  
   299  		// bloom when it's done.
   300  		// Note: we don't enable it if snap-sync is performed, since it's very heavy
   301  		// and the heal-portion of the snap sync is much lighter than fast. What we particularly
   302  		// want to avoid, is a 90%-finished (but restarted) snap-sync to begin
   303  		// indexing the entire trie
   304  		var stateBloom *statedb.SyncBloom
   305  		if atomic.LoadUint32(&manager.fastSync) == 1 && atomic.LoadUint32(&manager.snapSync) == 0 {
   306  			stateBloom = statedb.NewSyncBloom(uint64(cacheLimit), chainDB.GetStateTrieDB())
   307  		}
   308  		var proposerPolicy uint64
   309  		if config.Istanbul != nil {
   310  			proposerPolicy = config.Istanbul.ProposerPolicy
   311  		}
   312  		manager.downloader = downloader.New(mode, chainDB, stateBloom, manager.eventMux, blockchain, nil, manager.removePeer, proposerPolicy)
   313  	}
   314  
   315  	// Create and set fetcher
   316  	if cnconfig.FetcherDisable {
   317  		manager.fetcher = fetcher.NewFakeFetcher()
   318  	} else {
   319  		validator := func(header *types.Header) error {
   320  			return engine.VerifyHeader(blockchain, header, true)
   321  		}
   322  		heighter := func() uint64 {
   323  			return blockchain.CurrentBlock().NumberU64()
   324  		}
   325  		inserter := func(blocks types.Blocks) (int, error) {
   326  			// If fast sync is running, deny importing weird blocks
   327  			if atomic.LoadUint32(&manager.fastSync) == 1 || atomic.LoadUint32(&manager.snapSync) == 1 {
   328  				logger.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   329  				return 0, nil
   330  			}
   331  			atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   332  			return manager.blockchain.InsertChain(blocks)
   333  		}
   334  		manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, manager.BroadcastBlockHash, heighter, inserter, manager.removePeer)
   335  	}
   336  
   337  	if manager.useTxResend() {
   338  		go manager.txResendLoop(cnconfig.TxResendInterval, cnconfig.TxResendCount)
   339  	}
   340  	return manager, nil
   341  }
   342  
   343  // istanbul BFT
   344  func (pm *ProtocolManager) RegisterValidator(connType common.ConnType, validator p2p.PeerTypeValidator) {
   345  	pm.peers.RegisterValidator(connType, validator)
   346  }
   347  
   348  func (pm *ProtocolManager) getWSEndPoint() string {
   349  	return pm.wsendpoint
   350  }
   351  
   352  func (pm *ProtocolManager) removePeer(id string) {
   353  	// Short circuit if the peer was already removed
   354  	peer := pm.peers.Peer(id)
   355  	if peer == nil {
   356  		return
   357  	}
   358  	logger.Debug("Removing Klaytn peer", "peer", id)
   359  	if peer.ExistSnapExtension() {
   360  		pm.downloader.GetSnapSyncer().Unregister(id)
   361  	}
   362  
   363  	// Unregister the peer from the downloader and peer set
   364  	pm.downloader.UnregisterPeer(id)
   365  	if err := pm.peers.Unregister(id); err != nil {
   366  		logger.Error("Peer removal failed", "peer", id, "err", err)
   367  	}
   368  	// Hard disconnect at the networking layer
   369  	if peer != nil {
   370  		peer.GetP2PPeer().Disconnect(p2p.DiscUselessPeer)
   371  	}
   372  }
   373  
   374  // getChainID returns the current chain id.
   375  func (pm *ProtocolManager) getChainID() *big.Int {
   376  	return pm.blockchain.Config().ChainID
   377  }
   378  
   379  func (pm *ProtocolManager) Start(maxPeers int) {
   380  	pm.maxPeers = maxPeers
   381  
   382  	// broadcast transactions
   383  	pm.txsCh = make(chan blockchain.NewTxsEvent, txChanSize)
   384  	pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
   385  	go pm.txBroadcastLoop()
   386  
   387  	// broadcast mined blocks
   388  	pm.minedBlockSub = pm.eventMux.Subscribe(blockchain.NewMinedBlockEvent{})
   389  	go pm.minedBroadcastLoop()
   390  
   391  	// start sync handlers
   392  	go pm.syncer()
   393  	go pm.txsyncLoop()
   394  }
   395  
   396  func (pm *ProtocolManager) Stop() {
   397  	logger.Info("Stopping Klaytn protocol")
   398  
   399  	pm.txsSub.Unsubscribe()        // quits txBroadcastLoop
   400  	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   401  
   402  	// Quit the sync loop.
   403  	// After this send has completed, no new peers will be accepted.
   404  	pm.noMorePeers <- struct{}{}
   405  
   406  	if pm.useTxResend() {
   407  		// Quit resend loop
   408  		pm.quitResendCh <- struct{}{}
   409  	}
   410  
   411  	// Quit fetcher, txsyncLoop.
   412  	close(pm.quitSync)
   413  
   414  	// Disconnect existing sessions.
   415  	// This also closes the gate for any new registrations on the peer set.
   416  	// sessions which are already established but not added to pm.peers yet
   417  	// will exit when they try to register.
   418  	pm.peers.Close()
   419  
   420  	// Wait for all peer handler goroutines and the loops to come down.
   421  	pm.wg.Wait()
   422  
   423  	logger.Info("Klaytn protocol stopped")
   424  }
   425  
   426  // SetSyncStop sets value of syncStop flag. If it's true, peer sync process does not proceed.
   427  func (pm *ProtocolManager) SetSyncStop(flag bool) {
   428  	var i int32 = 0
   429  	if flag {
   430  		i = 1
   431  	}
   432  	atomic.StoreInt32(&(pm.syncStop), int32(i))
   433  }
   434  
   435  func (pm *ProtocolManager) GetSyncStop() bool {
   436  	if atomic.LoadInt32(&(pm.syncStop)) != 0 {
   437  		return true
   438  	}
   439  	return false
   440  }
   441  
   442  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) Peer {
   443  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   444  }
   445  
   446  // newPeerWithRWs creates a new Peer object with a slice of p2p.MsgReadWriter.
   447  func (pm *ProtocolManager) newPeerWithRWs(pv int, p *p2p.Peer, rws []p2p.MsgReadWriter) (Peer, error) {
   448  	meteredRWs := make([]p2p.MsgReadWriter, 0, len(rws))
   449  	for _, rw := range rws {
   450  		meteredRWs = append(meteredRWs, newMeteredMsgWriter(rw))
   451  	}
   452  	return newPeerWithRWs(pv, p, meteredRWs)
   453  }
   454  
   455  func (pm *ProtocolManager) handleSnapPeer(peer *snap.Peer) error {
   456  	pm.peerWg.Add(1)
   457  	defer pm.peerWg.Done()
   458  
   459  	if err := pm.peers.RegisterSnapExtension(peer); err != nil {
   460  		peer.Log().Warn("Snapshot extension registration failed", "err", err)
   461  		return err
   462  	}
   463  
   464  	return snap.Handle(pm.blockchain, pm.downloader, peer)
   465  }
   466  
   467  // handle is the callback invoked to manage the life cycle of a Klaytn peer. When
   468  // this function terminates, the peer is disconnected.
   469  func (pm *ProtocolManager) handle(p Peer) error {
   470  	// If the peer has a `snap` extension, wait for it to connect so we can have
   471  	// a uniform initialization/teardown mechanism
   472  	snap, err := pm.peers.WaitSnapExtension(p)
   473  	if err != nil {
   474  		p.GetP2PPeer().Log().Error("Snapshot extension barrier failed", "err", err)
   475  		return err
   476  	}
   477  
   478  	// Ignore maxPeers if this is a trusted peer
   479  	if pm.peers.Len() >= pm.maxPeers && !p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
   480  		return p2p.DiscTooManyPeers
   481  	}
   482  	p.GetP2PPeer().Log().Debug("Klaytn peer connected", "name", p.GetP2PPeer().Name())
   483  
   484  	pm.peerWg.Add(1)
   485  	defer pm.peerWg.Done()
   486  
   487  	// Execute the handshake
   488  	var (
   489  		genesis = pm.blockchain.Genesis()
   490  		head    = pm.blockchain.CurrentHeader()
   491  		hash    = head.Hash()
   492  		number  = head.Number.Uint64()
   493  		td      = pm.blockchain.GetTd(hash, number)
   494  	)
   495  
   496  	if err := p.Handshake(pm.networkId, pm.getChainID(), td, hash, genesis.Hash()); err != nil {
   497  		p.GetP2PPeer().Log().Debug("Klaytn peer handshake failed", "err", err)
   498  		return err
   499  	}
   500  	reject := false
   501  	if atomic.LoadUint32(&pm.snapSync) == 1 {
   502  		if snap == nil {
   503  			// If we are running snap-sync, we want to reserve roughly half the peer
   504  			// slots for peers supporting the snap protocol.
   505  			// The logic here is; we only allow up to ExtraNonSnapPeers more non-snap peers than snap-peers.
   506  			if all, snp := pm.peers.Len(), pm.peers.SnapLen(); all-snp > snp+ExtraNonSnapPeers {
   507  				reject = true
   508  			}
   509  		}
   510  	}
   511  	// Ignore maxPeers if this is a trusted peer
   512  	if p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
   513  		if reject || pm.peers.Len() >= pm.maxPeers {
   514  			return p2p.DiscTooManyPeers
   515  		}
   516  	}
   517  
   518  	if rw, ok := p.GetRW().(*meteredMsgReadWriter); ok {
   519  		rw.Init(p.GetVersion())
   520  	}
   521  
   522  	// Register the peer locally
   523  	if err := pm.peers.Register(p, snap); err != nil {
   524  		// if starting node with unlock account, can't register peer until finish unlock
   525  		p.GetP2PPeer().Log().Info("Klaytn peer registration failed", "err", err)
   526  		return err
   527  	}
   528  	defer pm.removePeer(p.GetID())
   529  
   530  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   531  	if err := pm.downloader.RegisterPeer(p.GetID(), p.GetVersion(), p); err != nil {
   532  		return err
   533  	}
   534  	if snap != nil {
   535  		if err := pm.downloader.GetSnapSyncer().Register(snap); err != nil {
   536  			p.GetP2PPeer().Log().Info("Failed to register peer in snap syncer", "err", err)
   537  			return err
   538  		}
   539  	}
   540  
   541  	// Propagate existing transactions. new transactions appearing
   542  	// after this will be sent via broadcasts.
   543  	pm.syncTransactions(p)
   544  
   545  	p.GetP2PPeer().Log().Info("Added a single channel P2P Peer", "peerID", p.GetP2PPeerID())
   546  
   547  	pubKey, err := p.GetP2PPeerID().Pubkey()
   548  	if err != nil {
   549  		return err
   550  	}
   551  	addr := crypto.PubkeyToAddress(*pubKey)
   552  
   553  	// TODO-Klaytn check global worker and peer worker
   554  	messageChannel := make(chan p2p.Msg, channelSizePerPeer)
   555  	defer close(messageChannel)
   556  	errChannel := make(chan error, channelSizePerPeer)
   557  	for w := 1; w <= concurrentPerPeer; w++ {
   558  		go pm.processMsg(messageChannel, p, addr, errChannel)
   559  	}
   560  
   561  	// main loop. handle incoming messages.
   562  	for {
   563  		msg, err := p.GetRW().ReadMsg()
   564  		if err != nil {
   565  			p.GetP2PPeer().Log().Warn("ProtocolManager failed to read msg", "err", err)
   566  			return err
   567  		}
   568  		if msg.Size > ProtocolMaxMsgSize {
   569  			err := errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   570  			p.GetP2PPeer().Log().Warn("ProtocolManager over max msg size", "err", err)
   571  			return err
   572  		}
   573  
   574  		select {
   575  		case err := <-errChannel:
   576  			return err
   577  		case messageChannel <- msg:
   578  		}
   579  		// go pm.handleMsg(p, addr, msg)
   580  
   581  		//if err := pm.handleMsg(p); err != nil {
   582  		//	p.Log().Debug("Klaytn message handling failed", "err", err)
   583  		//	return err
   584  		//}
   585  	}
   586  }
   587  
   588  func (pm *ProtocolManager) processMsg(msgCh <-chan p2p.Msg, p Peer, addr common.Address, errCh chan<- error) {
   589  	defer func() {
   590  		if err := recover(); err != nil {
   591  			logger.Error("stacktrace from panic: \n" + string(debug.Stack()))
   592  			logger.Warn("the panic is recovered", "panicErr", err)
   593  			errCh <- errUnknownProcessingError
   594  		}
   595  	}()
   596  
   597  	_, fakeF := pm.fetcher.(*fetcher.FakeFetcher)
   598  	_, fakeD := pm.downloader.(*downloader.FakeDownloader)
   599  	if fakeD || fakeF {
   600  		p.GetP2PPeer().Log().Warn("ProtocolManager does not handle p2p messages", "fakeFetcher", fakeF, "fakeDownloader", fakeD)
   601  		for msg := range msgCh {
   602  			msg.Discard()
   603  		}
   604  	} else {
   605  		for msg := range msgCh {
   606  			if err := pm.handleMsg(p, addr, msg); err != nil {
   607  				p.GetP2PPeer().Log().Error("ProtocolManager failed to handle message", "msg", msg, "err", err)
   608  				errCh <- err
   609  				return
   610  			}
   611  			msg.Discard()
   612  		}
   613  	}
   614  
   615  	p.GetP2PPeer().Log().Debug("ProtocolManager.processMsg closed", "PeerName", p.GetP2PPeer().Name())
   616  }
   617  
   618  // processConsensusMsg processes the consensus message.
   619  func (pm *ProtocolManager) processConsensusMsg(msgCh <-chan p2p.Msg, p Peer, addr common.Address, errCh chan<- error) {
   620  	for msg := range msgCh {
   621  		if handler, ok := pm.engine.(consensus.Handler); ok {
   622  			_, err := handler.HandleMsg(addr, msg)
   623  			// if msg is istanbul msg, handled is true and err is nil if handle msg is successful.
   624  			if err != nil {
   625  				p.GetP2PPeer().Log().Warn("ProtocolManager failed to handle consensus message. This can happen during block synchronization.", "msg", msg, "err", err)
   626  				errCh <- err
   627  				return
   628  			}
   629  		}
   630  		msg.Discard()
   631  	}
   632  	p.GetP2PPeer().Log().Info("ProtocolManager.processConsensusMsg closed", "PeerName", p.GetP2PPeer().Name())
   633  }
   634  
   635  // handleMsg is invoked whenever an inbound message is received from a remote
   636  // peer. The remote connection is torn down upon returning any error.
   637  func (pm *ProtocolManager) handleMsg(p Peer, addr common.Address, msg p2p.Msg) error {
   638  	// Below message size checking is done by handle().
   639  	// Read the next message from the remote peer, and ensure it's fully consumed
   640  	//msg, err := p.rw.ReadMsg()
   641  	//if err != nil {
   642  	//	return err
   643  	//}
   644  	//if msg.Size > ProtocolMaxMsgSize {
   645  	//	return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   646  	//}
   647  	//defer msg.Discard()
   648  
   649  	// istanbul BFT
   650  	if handler, ok := pm.engine.(consensus.Handler); ok {
   651  		//pubKey, err := p.ID().Pubkey()
   652  		//if err != nil {
   653  		//	return err
   654  		//}
   655  		//addr := crypto.PubkeyToAddress(*pubKey)
   656  		handled, err := handler.HandleMsg(addr, msg)
   657  		// if msg is istanbul msg, handled is true and err is nil if handle msg is successful.
   658  		if handled {
   659  			return err
   660  		}
   661  	}
   662  
   663  	// Handle the message depending on its contents
   664  	switch {
   665  	case msg.Code == StatusMsg:
   666  		// Status messages should never arrive after the handshake
   667  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   668  
   669  		// Block header query, collect the requested headers and reply
   670  	case msg.Code == BlockHeadersRequestMsg:
   671  		if err := handleBlockHeadersRequestMsg(pm, p, msg); err != nil {
   672  			return err
   673  		}
   674  
   675  	case msg.Code == BlockHeadersMsg:
   676  		if err := handleBlockHeadersMsg(pm, p, msg); err != nil {
   677  			return err
   678  		}
   679  
   680  	case msg.Code == BlockBodiesRequestMsg:
   681  		if err := handleBlockBodiesRequestMsg(pm, p, msg); err != nil {
   682  			return err
   683  		}
   684  
   685  	case msg.Code == BlockBodiesMsg:
   686  		if err := handleBlockBodiesMsg(pm, p, msg); err != nil {
   687  			return err
   688  		}
   689  
   690  	case p.GetVersion() >= klay63 && msg.Code == NodeDataRequestMsg:
   691  		if err := handleNodeDataRequestMsg(pm, p, msg); err != nil {
   692  			return err
   693  		}
   694  
   695  	case p.GetVersion() >= klay63 && msg.Code == NodeDataMsg:
   696  		if err := handleNodeDataMsg(pm, p, msg); err != nil {
   697  			return err
   698  		}
   699  
   700  	case p.GetVersion() >= klay63 && msg.Code == ReceiptsRequestMsg:
   701  		if err := handleReceiptsRequestMsg(pm, p, msg); err != nil {
   702  			return err
   703  		}
   704  
   705  	case p.GetVersion() >= klay63 && msg.Code == ReceiptsMsg:
   706  		if err := handleReceiptsMsg(pm, p, msg); err != nil {
   707  			return err
   708  		}
   709  
   710  	case p.GetVersion() >= klay65 && msg.Code == StakingInfoRequestMsg:
   711  		if err := handleStakingInfoRequestMsg(pm, p, msg); err != nil {
   712  			return err
   713  		}
   714  
   715  	case p.GetVersion() >= klay65 && msg.Code == StakingInfoMsg:
   716  		if err := handleStakingInfoMsg(pm, p, msg); err != nil {
   717  			return err
   718  		}
   719  
   720  	case msg.Code == NewBlockHashesMsg:
   721  		if err := handleNewBlockHashesMsg(pm, p, msg); err != nil {
   722  			return err
   723  		}
   724  
   725  	case msg.Code == BlockHeaderFetchRequestMsg:
   726  		if err := handleBlockHeaderFetchRequestMsg(pm, p, msg); err != nil {
   727  			return err
   728  		}
   729  
   730  	case msg.Code == BlockHeaderFetchResponseMsg:
   731  		if err := handleBlockHeaderFetchResponseMsg(pm, p, msg); err != nil {
   732  			return err
   733  		}
   734  
   735  	case msg.Code == BlockBodiesFetchRequestMsg:
   736  		if err := handleBlockBodiesFetchRequestMsg(pm, p, msg); err != nil {
   737  			return err
   738  		}
   739  
   740  	case msg.Code == BlockBodiesFetchResponseMsg:
   741  		if err := handleBlockBodiesFetchResponseMsg(pm, p, msg); err != nil {
   742  			return err
   743  		}
   744  
   745  	case msg.Code == NewBlockMsg:
   746  		if err := handleNewBlockMsg(pm, p, msg); err != nil {
   747  			return err
   748  		}
   749  
   750  	case msg.Code == TxMsg:
   751  		if err := handleTxMsg(pm, p, msg); err != nil {
   752  			return err
   753  		}
   754  
   755  	default:
   756  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   757  	}
   758  	return nil
   759  }
   760  
   761  // handleBlockHeadersRequestMsg handles block header request message.
   762  func handleBlockHeadersRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   763  	// Decode the complex header query
   764  	var query getBlockHeadersData
   765  	if err := msg.Decode(&query); err != nil {
   766  		return errResp(ErrDecode, "%v: %v", msg, err)
   767  	}
   768  	hashMode := query.Origin.Hash != (common.Hash{})
   769  
   770  	// Gather headers until the fetch or network limits is reached
   771  	var (
   772  		bytes   common.StorageSize
   773  		headers []*types.Header
   774  		unknown bool
   775  	)
   776  	for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   777  		// Retrieve the next header satisfying the query
   778  		var origin *types.Header
   779  		if hashMode {
   780  			origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   781  		} else {
   782  			origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   783  		}
   784  		if origin == nil {
   785  			break
   786  		}
   787  		number := origin.Number.Uint64()
   788  		headers = append(headers, origin)
   789  		bytes += estHeaderRlpSize
   790  
   791  		// Advance to the next header of the query
   792  		switch {
   793  		case query.Origin.Hash != (common.Hash{}) && query.Reverse:
   794  			// Hash based traversal towards the genesis block
   795  			for i := 0; i < int(query.Skip)+1; i++ {
   796  				if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
   797  					query.Origin.Hash = header.ParentHash
   798  					number--
   799  				} else {
   800  					unknown = true
   801  					break
   802  				}
   803  			}
   804  		case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
   805  			// Hash based traversal towards the leaf block
   806  			var (
   807  				current = origin.Number.Uint64()
   808  				next    = current + query.Skip + 1
   809  			)
   810  			if next <= current {
   811  				infos, _ := json.MarshalIndent(p.GetP2PPeer().Info(), "", "  ")
   812  				p.GetP2PPeer().Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   813  				unknown = true
   814  			} else {
   815  				if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   816  					if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
   817  						query.Origin.Hash = header.Hash()
   818  					} else {
   819  						unknown = true
   820  					}
   821  				} else {
   822  					unknown = true
   823  				}
   824  			}
   825  		case query.Reverse:
   826  			// Number based traversal towards the genesis block
   827  			if query.Origin.Number >= query.Skip+1 {
   828  				query.Origin.Number -= query.Skip + 1
   829  			} else {
   830  				unknown = true
   831  			}
   832  
   833  		case !query.Reverse:
   834  			// Number based traversal towards the leaf block
   835  			query.Origin.Number += query.Skip + 1
   836  		}
   837  	}
   838  	return p.SendBlockHeaders(headers)
   839  }
   840  
   841  // handleBlockHeadersMsg handles block header response message.
   842  func handleBlockHeadersMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   843  	// A batch of headers arrived to one of our previous requests
   844  	var headers []*types.Header
   845  	if err := msg.Decode(&headers); err != nil {
   846  		return errResp(ErrDecode, "msg %v: %v", msg, err)
   847  	}
   848  	if err := pm.downloader.DeliverHeaders(p.GetID(), headers); err != nil {
   849  		logger.Debug("Failed to deliver headers", "err", err)
   850  	}
   851  	return nil
   852  }
   853  
   854  // handleBlockBodiesRequest handles common part for handleBlockBodiesRequest and
   855  // handleBlockBodiesFetchRequestMsg. It decodes the message to get list of hashes
   856  // and then send block bodies corresponding to those hashes.
   857  func handleBlockBodiesRequest(pm *ProtocolManager, p Peer, msg p2p.Msg) ([]rlp.RawValue, error) {
   858  	// Decode the retrieval message
   859  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   860  	if _, err := msgStream.List(); err != nil {
   861  		return nil, err
   862  	}
   863  	// Gather blocks until the fetch or network limits is reached
   864  	var (
   865  		hash   common.Hash
   866  		bytes  int
   867  		bodies []rlp.RawValue
   868  	)
   869  	for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   870  		// Retrieve the hash of the next block
   871  		if err := msgStream.Decode(&hash); err == rlp.EOL {
   872  			break
   873  		} else if err != nil {
   874  			return nil, errResp(ErrDecode, "msg %v: %v", msg, err)
   875  		}
   876  		// Retrieve the requested block body, stopping if enough was found
   877  		if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   878  			bodies = append(bodies, data)
   879  			bytes += len(data)
   880  		}
   881  	}
   882  
   883  	return bodies, nil
   884  }
   885  
   886  // handleBlockBodiesRequestMsg handles block body request message.
   887  func handleBlockBodiesRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   888  	if bodies, err := handleBlockBodiesRequest(pm, p, msg); err != nil {
   889  		return err
   890  	} else {
   891  		return p.SendBlockBodiesRLP(bodies)
   892  	}
   893  }
   894  
   895  // handleGetBlockBodiesMsg handles block body response message.
   896  func handleBlockBodiesMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   897  	// A batch of block bodies arrived to one of our previous requests
   898  	var request blockBodiesData
   899  	if err := msg.Decode(&request); err != nil {
   900  		return errResp(ErrDecode, "msg %v: %v", msg, err)
   901  	}
   902  	// Deliver them all to the downloader for queuing
   903  	transactions := make([][]*types.Transaction, len(request))
   904  
   905  	for i, body := range request {
   906  		transactions[i] = body.Transactions
   907  	}
   908  
   909  	err := pm.downloader.DeliverBodies(p.GetID(), transactions)
   910  	if err != nil {
   911  		logger.Debug("Failed to deliver bodies", "err", err)
   912  	}
   913  
   914  	return nil
   915  }
   916  
   917  // handleNodeDataRequestMsg handles node data request message.
   918  func handleNodeDataRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   919  	// Decode the retrieval message
   920  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   921  	if _, err := msgStream.List(); err != nil {
   922  		return err
   923  	}
   924  	// Gather state data until the fetch or network limits is reached
   925  	var (
   926  		hash  common.Hash
   927  		bytes int
   928  		data  [][]byte
   929  	)
   930  	for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   931  		// Retrieve the hash of the next state entry
   932  		if err := msgStream.Decode(&hash); err == rlp.EOL {
   933  			break
   934  		} else if err != nil {
   935  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   936  		}
   937  		// Retrieve the requested state entry, stopping if enough was found
   938  		// TODO-Klaytn-Snapsync now the code and trienode is mixed in the protocol level, separate these two types.
   939  		entry, err := pm.blockchain.TrieNode(hash)
   940  		if len(entry) == 0 || err != nil {
   941  			// Read the contract code with prefix only to save unnecessary lookups.
   942  			entry, err = pm.blockchain.ContractCodeWithPrefix(hash)
   943  		}
   944  		if err == nil && len(entry) > 0 {
   945  			data = append(data, entry)
   946  			bytes += len(entry)
   947  		}
   948  	}
   949  	return p.SendNodeData(data)
   950  }
   951  
   952  // handleNodeDataMsg handles node data response message.
   953  func handleNodeDataMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   954  	// A batch of node state data arrived to one of our previous requests
   955  	var data [][]byte
   956  	if err := msg.Decode(&data); err != nil {
   957  		return errResp(ErrDecode, "msg %v: %v", msg, err)
   958  	}
   959  	// Deliver all to the downloader
   960  	if err := pm.downloader.DeliverNodeData(p.GetID(), data); err != nil {
   961  		logger.Debug("Failed to deliver node state data", "err", err)
   962  	}
   963  	return nil
   964  }
   965  
   966  // handleGetReceiptsMsg handles receipt request message.
   967  func handleReceiptsRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   968  	// Decode the retrieval message
   969  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   970  	if _, err := msgStream.List(); err != nil {
   971  		return err
   972  	}
   973  	// Gather state data until the fetch or network limits is reached
   974  	var (
   975  		hash     common.Hash
   976  		bytes    int
   977  		receipts []rlp.RawValue
   978  	)
   979  	for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   980  		// Retrieve the hash of the next block
   981  		if err := msgStream.Decode(&hash); err == rlp.EOL {
   982  			break
   983  		} else if err != nil {
   984  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   985  		}
   986  		// Retrieve the requested block's receipts, skipping if unknown to us
   987  		results := pm.blockchain.GetReceiptsByBlockHash(hash)
   988  		if results == nil {
   989  			if header := pm.blockchain.GetHeaderByHash(hash); header == nil || !header.EmptyReceipts() {
   990  				continue
   991  			}
   992  		}
   993  		// If known, encode and queue for response packet
   994  		if encoded, err := rlp.EncodeToBytes(results); err != nil {
   995  			logger.Error("Failed to encode receipt", "err", err)
   996  		} else {
   997  			receipts = append(receipts, encoded)
   998  			bytes += len(encoded)
   999  		}
  1000  	}
  1001  	return p.SendReceiptsRLP(receipts)
  1002  }
  1003  
  1004  // handleReceiptsMsg handles receipt response message.
  1005  func handleReceiptsMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1006  	// A batch of receipts arrived to one of our previous requests
  1007  	var receipts [][]*types.Receipt
  1008  	if err := msg.Decode(&receipts); err != nil {
  1009  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1010  	}
  1011  	// Deliver all to the downloader
  1012  	if err := pm.downloader.DeliverReceipts(p.GetID(), receipts); err != nil {
  1013  		logger.Debug("Failed to deliver receipts", "err", err)
  1014  	}
  1015  	return nil
  1016  }
  1017  
  1018  // handleStakingInfoRequestMsg handles staking information request message.
  1019  func handleStakingInfoRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1020  	if pm.chainconfig.Istanbul == nil || pm.chainconfig.Istanbul.ProposerPolicy != uint64(istanbul.WeightedRandom) {
  1021  		return errResp(ErrUnsupportedEnginePolicy, "the engine is not istanbul or the policy is not weighted random")
  1022  	}
  1023  
  1024  	// Decode the retrieval message
  1025  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
  1026  	if _, err := msgStream.List(); err != nil {
  1027  		return err
  1028  	}
  1029  	// Gather state data until the fetch or network limits is reached
  1030  	var (
  1031  		hash         common.Hash
  1032  		bytes        int
  1033  		stakingInfos []rlp.RawValue
  1034  	)
  1035  	for bytes < softResponseLimit && len(stakingInfos) < downloader.MaxStakingInfoFetch {
  1036  		// Retrieve the hash of the next block
  1037  		if err := msgStream.Decode(&hash); err == rlp.EOL {
  1038  			break
  1039  		} else if err != nil {
  1040  			return errResp(ErrDecode, "msg %v: %v", msg, err)
  1041  		}
  1042  
  1043  		// Retrieve the requested block's staking information, skipping if unknown to us
  1044  		header := pm.blockchain.GetHeaderByHash(hash)
  1045  		if header == nil {
  1046  			continue
  1047  		}
  1048  		result := reward.GetStakingInfoOnStakingBlock(header.Number.Uint64())
  1049  		if result == nil {
  1050  			continue
  1051  		}
  1052  		// If known, encode and queue for response packet
  1053  		if encoded, err := rlp.EncodeToBytes(result); err != nil {
  1054  			logger.Error("Failed to encode staking info", "err", err)
  1055  		} else {
  1056  			stakingInfos = append(stakingInfos, encoded)
  1057  			bytes += len(encoded)
  1058  		}
  1059  	}
  1060  	return p.SendStakingInfoRLP(stakingInfos)
  1061  }
  1062  
  1063  // handleStakingInfoMsg handles staking information response message.
  1064  func handleStakingInfoMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1065  	if pm.chainconfig.Istanbul == nil || pm.chainconfig.Istanbul.ProposerPolicy != uint64(istanbul.WeightedRandom) {
  1066  		return errResp(ErrUnsupportedEnginePolicy, "the engine is not istanbul or the policy is not weighted random")
  1067  	}
  1068  
  1069  	// A batch of stakingInfos arrived to one of our previous requests
  1070  	var stakingInfos []*reward.StakingInfo
  1071  	if err := msg.Decode(&stakingInfos); err != nil {
  1072  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1073  	}
  1074  	// Deliver all to the downloader
  1075  	if err := pm.downloader.DeliverStakingInfos(p.GetID(), stakingInfos); err != nil {
  1076  		logger.Debug("Failed to deliver staking information", "err", err)
  1077  	}
  1078  	return nil
  1079  }
  1080  
  1081  // handleNewBlockHashesMsg handles new block hashes message.
  1082  func handleNewBlockHashesMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1083  	var (
  1084  		announces     newBlockHashesData
  1085  		maxTD         uint64
  1086  		candidateHash *common.Hash
  1087  	)
  1088  	if err := msg.Decode(&announces); err != nil {
  1089  		return errResp(ErrDecode, "%v: %v", msg, err)
  1090  	}
  1091  	// Mark the hashes as present at the remote node
  1092  	// Schedule all the unknown hashes for retrieval
  1093  	for _, block := range announces {
  1094  		p.AddToKnownBlocks(block.Hash)
  1095  
  1096  		if maxTD < block.Number {
  1097  			maxTD = block.Number
  1098  			candidateHash = &block.Hash
  1099  		}
  1100  		if !pm.blockchain.HasBlock(block.Hash, block.Number) {
  1101  			pm.fetcher.Notify(p.GetID(), block.Hash, block.Number, time.Now(), p.FetchBlockHeader, p.FetchBlockBodies)
  1102  		}
  1103  	}
  1104  	blockTD := big.NewInt(int64(maxTD))
  1105  	if _, td := p.Head(); blockTD.Cmp(td) > 0 && candidateHash != nil {
  1106  		p.SetHead(*candidateHash, blockTD)
  1107  	}
  1108  	return nil
  1109  }
  1110  
  1111  // handleBlockHeaderFetchRequestMsg handles block header fetch request message.
  1112  // It will send a header that the peer requested.
  1113  // If the peer requests a header which does not exist, error will be returned.
  1114  func handleBlockHeaderFetchRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1115  	var hash common.Hash
  1116  	if err := msg.Decode(&hash); err != nil {
  1117  		return errResp(ErrDecode, "%v: %v", msg, err)
  1118  	}
  1119  
  1120  	header := pm.blockchain.GetHeaderByHash(hash)
  1121  	if header == nil {
  1122  		return fmt.Errorf("peer requested header for non-existing hash. peer: %v, hash: %v", p.GetID(), hash)
  1123  	}
  1124  
  1125  	return p.SendFetchedBlockHeader(header)
  1126  }
  1127  
  1128  // handleBlockHeaderFetchResponseMsg handles new block header response message.
  1129  // This message should contain only one header.
  1130  func handleBlockHeaderFetchResponseMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1131  	var header *types.Header
  1132  	if err := msg.Decode(&header); err != nil {
  1133  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1134  	}
  1135  
  1136  	headers := pm.fetcher.FilterHeaders(p.GetID(), []*types.Header{header}, time.Now())
  1137  	if len(headers) != 0 {
  1138  		logger.Debug("Failed to filter header", "peer", p.GetID(),
  1139  			"num", header.Number.Uint64(), "hash", header.Hash(), "len(headers)", len(headers))
  1140  	}
  1141  
  1142  	return nil
  1143  }
  1144  
  1145  // handleBlockBodiesFetchRequestMsg handles block bodies fetch request message.
  1146  // If the peer requests bodies which do not exist, error will be returned.
  1147  func handleBlockBodiesFetchRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1148  	if bodies, err := handleBlockBodiesRequest(pm, p, msg); err != nil {
  1149  		return err
  1150  	} else {
  1151  		return p.SendFetchedBlockBodiesRLP(bodies)
  1152  	}
  1153  }
  1154  
  1155  // handleBlockBodiesFetchResponseMsg handles block bodies fetch response message.
  1156  func handleBlockBodiesFetchResponseMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1157  	// A batch of block bodies arrived to one of our previous requests
  1158  	var request blockBodiesData
  1159  	if err := msg.Decode(&request); err != nil {
  1160  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1161  	}
  1162  	// Deliver them all to the downloader for queuing
  1163  	transactions := make([][]*types.Transaction, len(request))
  1164  
  1165  	for i, body := range request {
  1166  		transactions[i] = body.Transactions
  1167  	}
  1168  
  1169  	transactions = pm.fetcher.FilterBodies(p.GetID(), transactions, time.Now())
  1170  
  1171  	if len(transactions) > 0 {
  1172  		logger.Warn("Failed to filter bodies", "peer", p.GetID(), "lenTxs", len(transactions))
  1173  	}
  1174  	return nil
  1175  }
  1176  
  1177  // handleNewBlockMsg handles new block message.
  1178  func handleNewBlockMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1179  	// Retrieve and decode the propagated block
  1180  	var request newBlockData
  1181  	if err := msg.Decode(&request); err != nil {
  1182  		return errResp(ErrDecode, "%v: %v", msg, err)
  1183  	}
  1184  	request.Block.ReceivedAt = msg.ReceivedAt
  1185  	request.Block.ReceivedFrom = p
  1186  
  1187  	// Mark the peer as owning the block and schedule it for import
  1188  	p.AddToKnownBlocks(request.Block.Hash())
  1189  	pm.fetcher.Enqueue(p.GetID(), request.Block)
  1190  
  1191  	// Assuming the block is importable by the peer, but possibly not yet done so,
  1192  	// calculate the head hash and TD that the peer truly must have.
  1193  	var (
  1194  		trueHead = request.Block.ParentHash()
  1195  		trueTD   = new(big.Int).Sub(request.TD, request.Block.BlockScore())
  1196  	)
  1197  	// Update the peers total blockscore if better than the previous
  1198  	if _, td := p.Head(); trueTD.Cmp(td) > 0 {
  1199  		p.SetHead(trueHead, trueTD)
  1200  
  1201  		// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
  1202  		// a singe block (as the true TD is below the propagated block), however this
  1203  		// scenario should easily be covered by the fetcher.
  1204  		currentBlock := pm.blockchain.CurrentBlock()
  1205  		if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
  1206  			go pm.synchronise(p)
  1207  		}
  1208  	}
  1209  	return nil
  1210  }
  1211  
  1212  // handleTxMsg handles transaction-propagating message.
  1213  func handleTxMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1214  	// Transactions arrived, make sure we have a valid and fresh chain to handle them
  1215  	if atomic.LoadUint32(&pm.acceptTxs) == 0 {
  1216  		return nil
  1217  	}
  1218  	// Transactions can be processed, parse all of them and deliver to the pool
  1219  	var txs types.Transactions
  1220  	if err := msg.Decode(&txs); err != nil {
  1221  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1222  	}
  1223  	// Only valid txs should be pushed into the pool.
  1224  	validTxs := make(types.Transactions, 0, len(txs))
  1225  	var err error
  1226  	for i, tx := range txs {
  1227  		// Validate and mark the remote transaction
  1228  		if tx == nil {
  1229  			err = errResp(ErrDecode, "transaction %d is nil", i)
  1230  			continue
  1231  		}
  1232  		p.AddToKnownTxs(tx.Hash())
  1233  		validTxs = append(validTxs, tx)
  1234  		txReceiveCounter.Inc(1)
  1235  	}
  1236  	pm.txpool.HandleTxMsg(validTxs)
  1237  	return err
  1238  }
  1239  
  1240  // sampleSize calculates the number of peers to send block.
  1241  // If calcSampleSize is smaller than minNumPeersToSendBlock, it returns minNumPeersToSendBlock.
  1242  // Otherwise, it returns calcSampleSize.
  1243  func sampleSize(peers []Peer) int {
  1244  	if len(peers) < minNumPeersToSendBlock {
  1245  		return len(peers)
  1246  	}
  1247  
  1248  	calcSampleSize := int(math.Sqrt(float64(len(peers))))
  1249  	if calcSampleSize > minNumPeersToSendBlock {
  1250  		return calcSampleSize
  1251  	} else {
  1252  		return minNumPeersToSendBlock
  1253  	}
  1254  }
  1255  
  1256  // BroadcastBlock will propagate a block to a subset of its peers.
  1257  // If current node is CN, it will send block to all PN peers + sampled CN peers without block.
  1258  // However, if there are more than 5 PN peers, it will sample 5 PN peers.
  1259  // If current node is not CN, it will send block to sampled peers except CNs.
  1260  func (pm *ProtocolManager) BroadcastBlock(block *types.Block) {
  1261  	if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent == nil {
  1262  		logger.Error("Propagating dangling block", "number", block.Number(), "hash", block.Hash())
  1263  		return
  1264  	}
  1265  	// TODO-Klaytn only send all validators + sub(peer) except subset for this block
  1266  	// transfer := peers[:int(math.Sqrt(float64(len(peers))))]
  1267  
  1268  	// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
  1269  	td := new(big.Int).Add(block.BlockScore(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
  1270  	peersToSendBlock := pm.peers.SamplePeersToSendBlock(block, pm.nodetype)
  1271  	for _, peer := range peersToSendBlock {
  1272  		peer.AsyncSendNewBlock(block, td)
  1273  	}
  1274  }
  1275  
  1276  // BroadcastBlockHash will propagate a blockHash to a subset of its peers.
  1277  func (pm *ProtocolManager) BroadcastBlockHash(block *types.Block) {
  1278  	if !pm.blockchain.HasBlock(block.Hash(), block.NumberU64()) {
  1279  		return
  1280  	}
  1281  
  1282  	// Otherwise if the block is indeed in out own chain, announce it
  1283  	peersWithoutBlock := pm.peers.PeersWithoutBlock(block.Hash())
  1284  	for _, peer := range peersWithoutBlock {
  1285  		// peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
  1286  		peer.AsyncSendNewBlockHash(block)
  1287  	}
  1288  	logger.Trace("Announced block", "hash", block.Hash(),
  1289  		"recipients", len(peersWithoutBlock), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
  1290  }
  1291  
  1292  // BroadcastTxs propagates a batch of transactions to its peers which are not known to
  1293  // already have the given transaction.
  1294  func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
  1295  	// This function calls sendTransaction() to broadcast the transactions for each peer.
  1296  	// In that case, transactions are sorted for each peer in sendTransaction().
  1297  	// Therefore, it prevents sorting transactions by each peer.
  1298  	if !sort.IsSorted(types.TxByTime(txs)) {
  1299  		sort.Sort(types.TxByTime(txs))
  1300  	}
  1301  
  1302  	switch pm.nodetype {
  1303  	case common.CONSENSUSNODE:
  1304  		pm.broadcastTxsFromCN(txs)
  1305  	case common.PROXYNODE:
  1306  		pm.broadcastTxsFromPN(txs)
  1307  	case common.ENDPOINTNODE:
  1308  		pm.broadcastTxsFromEN(txs)
  1309  	default:
  1310  		logger.Error("Unexpected nodeType of ProtocolManager", "nodeType", pm.nodetype)
  1311  	}
  1312  }
  1313  
  1314  func (pm *ProtocolManager) broadcastTxsFromCN(txs types.Transactions) {
  1315  	cnPeersWithoutTxs := make(map[Peer]types.Transactions)
  1316  	for _, tx := range txs {
  1317  		peers := pm.peers.CNWithoutTx(tx.Hash())
  1318  		if len(peers) == 0 {
  1319  			logger.Trace("No peer to broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
  1320  			continue
  1321  		}
  1322  
  1323  		// TODO-Klaytn Code Check
  1324  		// peers = peers[:int(math.Sqrt(float64(len(peers))))]
  1325  		half := (len(peers) / 2) + 2
  1326  		peers = samplingPeers(peers, half)
  1327  		for _, peer := range peers {
  1328  			cnPeersWithoutTxs[peer] = append(cnPeersWithoutTxs[peer], tx)
  1329  		}
  1330  		logger.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
  1331  	}
  1332  
  1333  	propTxPeersGauge.Update(int64(len(cnPeersWithoutTxs)))
  1334  	// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
  1335  	for peer, txs2 := range cnPeersWithoutTxs {
  1336  		// peer.SendTransactions(txs)
  1337  		peer.AsyncSendTransactions(txs2)
  1338  	}
  1339  }
  1340  
  1341  func (pm *ProtocolManager) broadcastTxsFromPN(txs types.Transactions) {
  1342  	cnPeersWithoutTxs := make(map[Peer]types.Transactions)
  1343  	peersWithoutTxs := make(map[Peer]types.Transactions)
  1344  	for _, tx := range txs {
  1345  		// TODO-Klaytn drop or missing tx
  1346  		cnPeers := pm.peers.CNWithoutTx(tx.Hash())
  1347  		if len(cnPeers) > 0 {
  1348  			cnPeers = samplingPeers(cnPeers, 2) // TODO-Klaytn optimize pickSize or propagation way
  1349  			for _, peer := range cnPeers {
  1350  				cnPeersWithoutTxs[peer] = append(cnPeersWithoutTxs[peer], tx)
  1351  			}
  1352  			logger.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(cnPeers))
  1353  		}
  1354  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.PROXYNODE, peersWithoutTxs)
  1355  		txSendCounter.Inc(1)
  1356  	}
  1357  
  1358  	propTxPeersGauge.Update(int64(len(peersWithoutTxs) + len(cnPeersWithoutTxs)))
  1359  	sendTransactions(cnPeersWithoutTxs)
  1360  	sendTransactions(peersWithoutTxs)
  1361  }
  1362  
  1363  func (pm *ProtocolManager) broadcastTxsFromEN(txs types.Transactions) {
  1364  	peersWithoutTxs := make(map[Peer]types.Transactions)
  1365  	for _, tx := range txs {
  1366  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.CONSENSUSNODE, peersWithoutTxs)
  1367  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.PROXYNODE, peersWithoutTxs)
  1368  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.ENDPOINTNODE, peersWithoutTxs)
  1369  		txSendCounter.Inc(1)
  1370  	}
  1371  
  1372  	propTxPeersGauge.Update(int64(len(peersWithoutTxs)))
  1373  	sendTransactions(peersWithoutTxs)
  1374  }
  1375  
  1376  // ReBroadcastTxs sends transactions, not considering whether the peer has the transaction or not.
  1377  // Only PN and EN rebroadcast transactions to its peers, a CN does not rebroadcast transactions.
  1378  func (pm *ProtocolManager) ReBroadcastTxs(txs types.Transactions) {
  1379  	// A consensus node does not rebroadcast transactions, hence return here.
  1380  	if pm.nodetype == common.CONSENSUSNODE {
  1381  		return
  1382  	}
  1383  
  1384  	// This function calls sendTransaction() to broadcast the transactions for each peer.
  1385  	// In that case, transactions are sorted for each peer in sendTransaction().
  1386  	// Therefore, it prevents sorting transactions by each peer.
  1387  	if !sort.IsSorted(types.TxByTime(txs)) {
  1388  		sort.Sort(types.TxByTime(txs))
  1389  	}
  1390  
  1391  	peersWithoutTxs := make(map[Peer]types.Transactions)
  1392  	for _, tx := range txs {
  1393  		peers := pm.peers.SampleResendPeersByType(pm.nodetype)
  1394  		for _, peer := range peers {
  1395  			peersWithoutTxs[peer] = append(peersWithoutTxs[peer], tx)
  1396  		}
  1397  		txResendCounter.Inc(1)
  1398  	}
  1399  
  1400  	propTxPeersGauge.Update(int64(len(peersWithoutTxs)))
  1401  	sendTransactions(peersWithoutTxs)
  1402  }
  1403  
  1404  // sendTransactions iterates the given map with the key-value pair of Peer and Transactions
  1405  // and sends the paired transactions to the peer in synchronised way.
  1406  func sendTransactions(txsSet map[Peer]types.Transactions) {
  1407  	for peer, txs := range txsSet {
  1408  		if err := peer.SendTransactions(txs); err != nil {
  1409  			logger.Error("Failed to send txs", "peer", peer.GetAddr(), "peerType", peer.ConnType(), "numTxs", len(txs), "err", err)
  1410  		}
  1411  	}
  1412  }
  1413  
  1414  func samplingPeers(peers []Peer, pickSize int) []Peer {
  1415  	if len(peers) <= pickSize {
  1416  		return peers
  1417  	}
  1418  
  1419  	picker := rand.New(rand.NewSource(time.Now().Unix()))
  1420  	peerCount := len(peers)
  1421  	for i := 0; i < peerCount; i++ {
  1422  		randIndex := picker.Intn(peerCount)
  1423  		peers[i], peers[randIndex] = peers[randIndex], peers[i]
  1424  	}
  1425  
  1426  	return peers[:pickSize]
  1427  }
  1428  
  1429  // Mined broadcast loop
  1430  func (pm *ProtocolManager) minedBroadcastLoop() {
  1431  	// automatically stops if unsubscribe
  1432  	for obj := range pm.minedBlockSub.Chan() {
  1433  		switch ev := obj.Data.(type) {
  1434  		case blockchain.NewMinedBlockEvent:
  1435  			pm.BroadcastBlock(ev.Block)     // First propagate block to peers
  1436  			pm.BroadcastBlockHash(ev.Block) // Only then announce to the rest
  1437  		}
  1438  	}
  1439  }
  1440  
  1441  func (pm *ProtocolManager) txBroadcastLoop() {
  1442  	for {
  1443  		select {
  1444  		case event := <-pm.txsCh:
  1445  			pm.BroadcastTxs(event.Txs)
  1446  			// Err() channel will be closed when unsubscribing.
  1447  		case <-pm.txsSub.Err():
  1448  			return
  1449  		}
  1450  	}
  1451  }
  1452  
  1453  func (pm *ProtocolManager) txResendLoop(period uint64, maxTxCount int) {
  1454  	tick := time.Duration(period) * time.Second
  1455  	resend := time.NewTicker(tick)
  1456  	defer resend.Stop()
  1457  
  1458  	logger.Debug("txResendloop started", "period", tick.Seconds())
  1459  
  1460  	for {
  1461  		select {
  1462  		case <-resend.C:
  1463  			pending := pm.txpool.CachedPendingTxsByCount(maxTxCount)
  1464  			pm.txResend(pending)
  1465  		case <-pm.quitResendCh:
  1466  			logger.Debug("txResendloop stopped")
  1467  			return
  1468  		}
  1469  	}
  1470  }
  1471  
  1472  func (pm *ProtocolManager) txResend(pending types.Transactions) {
  1473  	txResendRoutineGauge.Update(txResendRoutineGauge.Value() + 1)
  1474  	defer txResendRoutineGauge.Update(txResendRoutineGauge.Value() - 1)
  1475  	// TODO-Klaytn drop or missing tx
  1476  	if len(pending) > 0 {
  1477  		logger.Debug("Tx Resend", "count", len(pending))
  1478  		pm.ReBroadcastTxs(pending)
  1479  	}
  1480  }
  1481  
  1482  func (pm *ProtocolManager) useTxResend() bool {
  1483  	if pm.nodetype != common.CONSENSUSNODE && !pm.txResendUseLegacy {
  1484  		return true
  1485  	}
  1486  	return false
  1487  }
  1488  
  1489  // NodeInfo represents a short summary of the Klaytn sub-protocol metadata
  1490  // known about the host peer.
  1491  type NodeInfo struct {
  1492  	// TODO-Klaytn describe predefined network ID below
  1493  	Network    uint64              `json:"network"`    // Klaytn network ID
  1494  	BlockScore *big.Int            `json:"blockscore"` // Total blockscore of the host's blockchain
  1495  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
  1496  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
  1497  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
  1498  }
  1499  
  1500  // NodeInfo retrieves some protocol metadata about the running host node.
  1501  func (pm *ProtocolManager) NodeInfo() *NodeInfo {
  1502  	currentBlock := pm.blockchain.CurrentBlock()
  1503  	return &NodeInfo{
  1504  		Network:    pm.networkId,
  1505  		BlockScore: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
  1506  		Genesis:    pm.blockchain.Genesis().Hash(),
  1507  		Config:     pm.blockchain.Config(),
  1508  		Head:       currentBlock.Hash(),
  1509  	}
  1510  }
  1511  
  1512  // Below functions are used in Istanbul BFT consensus.
  1513  // Enqueue wraps fetcher's Enqueue function to insert the given block.
  1514  func (pm *ProtocolManager) Enqueue(id string, block *types.Block) {
  1515  	pm.fetcher.Enqueue(id, block)
  1516  }
  1517  
  1518  func (pm *ProtocolManager) FindPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
  1519  	m := make(map[common.Address]consensus.Peer)
  1520  	for _, p := range pm.peers.Peers() {
  1521  		addr := p.GetAddr()
  1522  		if addr == (common.Address{}) {
  1523  			pubKey, err := p.GetP2PPeerID().Pubkey()
  1524  			if err != nil {
  1525  				continue
  1526  			}
  1527  			addr = crypto.PubkeyToAddress(*pubKey)
  1528  			p.SetAddr(addr)
  1529  		}
  1530  		if targets[addr] {
  1531  			m[addr] = p
  1532  		}
  1533  	}
  1534  	return m
  1535  }
  1536  
  1537  func (pm *ProtocolManager) GetCNPeers() map[common.Address]consensus.Peer {
  1538  	m := make(map[common.Address]consensus.Peer)
  1539  	for addr, p := range pm.peers.CNPeers() {
  1540  		m[addr] = p
  1541  	}
  1542  	return m
  1543  }
  1544  
  1545  func (pm *ProtocolManager) FindCNPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
  1546  	m := make(map[common.Address]consensus.Peer)
  1547  	for addr, p := range pm.peers.CNPeers() {
  1548  		if targets[addr] {
  1549  			m[addr] = p
  1550  		}
  1551  	}
  1552  	return m
  1553  }
  1554  
  1555  func (pm *ProtocolManager) GetENPeers() map[common.Address]consensus.Peer {
  1556  	m := make(map[common.Address]consensus.Peer)
  1557  	for addr, p := range pm.peers.ENPeers() {
  1558  		m[addr] = p
  1559  	}
  1560  	return m
  1561  }
  1562  
  1563  func (pm *ProtocolManager) GetPeers() []common.Address {
  1564  	addrs := make([]common.Address, 0)
  1565  	for _, p := range pm.peers.Peers() {
  1566  		addr := p.GetAddr()
  1567  		if addr == (common.Address{}) {
  1568  			pubKey, err := p.GetP2PPeerID().Pubkey()
  1569  			if err != nil {
  1570  				continue
  1571  			}
  1572  			addr = crypto.PubkeyToAddress(*pubKey)
  1573  			p.SetAddr(addr)
  1574  		}
  1575  		addrs = append(addrs, addr)
  1576  	}
  1577  	return addrs
  1578  }
  1579  
  1580  func (pm *ProtocolManager) Downloader() ProtocolManagerDownloader {
  1581  	return pm.downloader
  1582  }
  1583  
  1584  func (pm *ProtocolManager) SetWsEndPoint(wsep string) {
  1585  	pm.wsendpoint = wsep
  1586  }
  1587  
  1588  func (pm *ProtocolManager) GetSubProtocols() []p2p.Protocol {
  1589  	return pm.SubProtocols
  1590  }
  1591  
  1592  func (pm *ProtocolManager) ProtocolVersion() int {
  1593  	return int(pm.SubProtocols[0].Version)
  1594  }
  1595  
  1596  func (pm *ProtocolManager) SetAcceptTxs() {
  1597  	atomic.StoreUint32(&pm.acceptTxs, 1)
  1598  }
  1599  
  1600  func (pm *ProtocolManager) NodeType() common.ConnType {
  1601  	return pm.nodetype
  1602  }