github.com/klaytn/klaytn@v1.10.2/node/cn/handler.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of go-ethereum.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/handler.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package cn
    22  
    23  import (
    24  	"encoding/json"
    25  	"errors"
    26  	"fmt"
    27  	"math"
    28  	"math/big"
    29  	"math/rand"
    30  	"runtime/debug"
    31  	"sort"
    32  	"sync"
    33  	"sync/atomic"
    34  	"time"
    35  
    36  	"github.com/klaytn/klaytn/accounts"
    37  	"github.com/klaytn/klaytn/blockchain"
    38  	"github.com/klaytn/klaytn/blockchain/types"
    39  	"github.com/klaytn/klaytn/common"
    40  	"github.com/klaytn/klaytn/consensus"
    41  	"github.com/klaytn/klaytn/consensus/istanbul"
    42  	"github.com/klaytn/klaytn/crypto"
    43  	"github.com/klaytn/klaytn/datasync/downloader"
    44  	"github.com/klaytn/klaytn/datasync/fetcher"
    45  	"github.com/klaytn/klaytn/event"
    46  	"github.com/klaytn/klaytn/networks/p2p"
    47  	"github.com/klaytn/klaytn/networks/p2p/discover"
    48  	"github.com/klaytn/klaytn/node/cn/snap"
    49  	"github.com/klaytn/klaytn/params"
    50  	"github.com/klaytn/klaytn/reward"
    51  	"github.com/klaytn/klaytn/rlp"
    52  	"github.com/klaytn/klaytn/storage/database"
    53  	"github.com/klaytn/klaytn/storage/statedb"
    54  	"github.com/klaytn/klaytn/work"
    55  )
    56  
    57  const (
    58  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    59  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    60  
    61  	// txChanSize is the size of channel listening to NewTxsEvent.
    62  	// The number is referenced from the size of tx pool.
    63  	txChanSize = 4096
    64  
    65  	concurrentPerPeer  = 3
    66  	channelSizePerPeer = 20
    67  
    68  	blockReceivingPNLimit  = 5 // maximum number of PNs that a CN broadcasts block.
    69  	minNumPeersToSendBlock = 3 // minimum number of peers that a node broadcasts block.
    70  
    71  	// DefaultMaxResendTxCount is the number of resending transactions to peer in order to prevent the txs from missing.
    72  	DefaultMaxResendTxCount = 1000
    73  
    74  	// DefaultTxResendInterval is the second of resending transactions period.
    75  	DefaultTxResendInterval = 4
    76  
    77  	// ExtraNonSnapPeers is the number of non-snap peers allowed to connect more than snap peers.
    78  	ExtraNonSnapPeers = 5
    79  )
    80  
    81  // errIncompatibleConfig is returned if the requested protocols and configs are
    82  // not compatible (low protocol version restrictions and high requirements).
    83  var errIncompatibleConfig = errors.New("incompatible configuration")
    84  
    85  var (
    86  	errUnknownProcessingError  = errors.New("unknown error during the msg processing")
    87  	errUnsupportedEnginePolicy = errors.New("unsupported engine or policy")
    88  )
    89  
    90  func errResp(code errCode, format string, v ...interface{}) error {
    91  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    92  }
    93  
    94  type ProtocolManager struct {
    95  	networkId uint64
    96  
    97  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    98  	snapSync  uint32 // Flag whether fast sync should operate on top of the snap protocol
    99  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
   100  
   101  	txpool      work.TxPool
   102  	blockchain  work.BlockChain
   103  	chainconfig *params.ChainConfig
   104  	maxPeers    int
   105  
   106  	downloader ProtocolManagerDownloader
   107  	fetcher    ProtocolManagerFetcher
   108  	peers      PeerSet
   109  
   110  	SubProtocols []p2p.Protocol
   111  
   112  	eventMux      *event.TypeMux
   113  	txsCh         chan blockchain.NewTxsEvent
   114  	txsSub        event.Subscription
   115  	minedBlockSub *event.TypeMuxSubscription
   116  
   117  	// channels for fetcher, syncer, txsyncLoop
   118  	newPeerCh   chan Peer
   119  	txsyncCh    chan *txsync
   120  	quitSync    chan struct{}
   121  	noMorePeers chan struct{}
   122  
   123  	quitResendCh chan struct{}
   124  	// wait group is used for graceful shutdowns during downloading
   125  	// and processing
   126  	wg     sync.WaitGroup
   127  	peerWg sync.WaitGroup
   128  	// istanbul BFT
   129  	engine consensus.Engine
   130  
   131  	rewardbase   common.Address
   132  	rewardwallet accounts.Wallet
   133  
   134  	wsendpoint string
   135  
   136  	nodetype          common.ConnType
   137  	txResendUseLegacy bool
   138  
   139  	// syncStop is a flag to stop peer sync
   140  	syncStop int32
   141  }
   142  
   143  // NewProtocolManager returns a new Klaytn sub protocol manager. The Klaytn sub protocol manages peers capable
   144  // with the Klaytn network.
   145  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux,
   146  	txpool work.TxPool, engine consensus.Engine, blockchain work.BlockChain, chainDB database.DBManager, cacheLimit int,
   147  	nodetype common.ConnType, cnconfig *Config,
   148  ) (*ProtocolManager, error) {
   149  	// Create the protocol maanger with the base fields
   150  	manager := &ProtocolManager{
   151  		networkId:         networkId,
   152  		eventMux:          mux,
   153  		txpool:            txpool,
   154  		blockchain:        blockchain,
   155  		chainconfig:       config,
   156  		peers:             newPeerSet(),
   157  		newPeerCh:         make(chan Peer),
   158  		noMorePeers:       make(chan struct{}),
   159  		txsyncCh:          make(chan *txsync),
   160  		quitSync:          make(chan struct{}),
   161  		quitResendCh:      make(chan struct{}),
   162  		engine:            engine,
   163  		nodetype:          nodetype,
   164  		txResendUseLegacy: cnconfig.TxResendUseLegacy,
   165  	}
   166  
   167  	// istanbul BFT
   168  	if handler, ok := engine.(consensus.Handler); ok {
   169  		handler.SetBroadcaster(manager, manager.nodetype)
   170  	}
   171  
   172  	// Figure out whether to allow fast sync or not
   173  	if (mode == downloader.FastSync || mode == downloader.SnapSync) && blockchain.CurrentBlock().NumberU64() > 0 {
   174  		logger.Error("Blockchain not empty, fast sync disabled")
   175  		mode = downloader.FullSync
   176  	}
   177  	if mode == downloader.FastSync {
   178  		manager.fastSync = uint32(1)
   179  		manager.snapSync = uint32(0)
   180  	}
   181  	if mode == downloader.SnapSync {
   182  		manager.fastSync = uint32(0)
   183  		manager.snapSync = uint32(1)
   184  	}
   185  	// istanbul BFT
   186  	protocol := engine.Protocol()
   187  	// Initiate a sub-protocol for every implemented version we can handle
   188  	manager.SubProtocols = make([]p2p.Protocol, 0, len(protocol.Versions))
   189  	for i, version := range protocol.Versions {
   190  		// Skip protocol version if incompatible with the mode of operation
   191  		if mode == downloader.FastSync && version < klay63 {
   192  			continue
   193  		}
   194  		// TODO-Klaytn-Snapsync add snapsync and version check here
   195  		if mode == downloader.SnapSync && version < klay65 {
   196  			continue
   197  		}
   198  		// Compatible; initialise the sub-protocol
   199  		version := version
   200  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   201  			Name:    protocol.Name,
   202  			Version: version,
   203  			Length:  protocol.Lengths[i],
   204  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   205  				peer := manager.newPeer(int(version), p, rw)
   206  				pubKey, err := p.ID().Pubkey()
   207  				if err != nil {
   208  					if p.ConnType() == common.CONSENSUSNODE {
   209  						return err
   210  					}
   211  					peer.SetAddr(common.Address{})
   212  				} else {
   213  					addr := crypto.PubkeyToAddress(*pubKey)
   214  					peer.SetAddr(addr)
   215  				}
   216  				select {
   217  				case manager.newPeerCh <- peer:
   218  					manager.wg.Add(1)
   219  					defer manager.wg.Done()
   220  					return manager.handle(peer)
   221  				case <-manager.quitSync:
   222  					return p2p.DiscQuitting
   223  				}
   224  			},
   225  			RunWithRWs: func(p *p2p.Peer, rws []p2p.MsgReadWriter) error {
   226  				peer, err := manager.newPeerWithRWs(int(version), p, rws)
   227  				if err != nil {
   228  					return err
   229  				}
   230  				pubKey, err := p.ID().Pubkey()
   231  				if err != nil {
   232  					if p.ConnType() == common.CONSENSUSNODE {
   233  						return err
   234  					}
   235  					peer.SetAddr(common.Address{})
   236  				} else {
   237  					addr := crypto.PubkeyToAddress(*pubKey)
   238  					peer.SetAddr(addr)
   239  				}
   240  				select {
   241  				case manager.newPeerCh <- peer:
   242  					manager.wg.Add(1)
   243  					defer manager.wg.Done()
   244  					return peer.Handle(manager)
   245  				case <-manager.quitSync:
   246  					return p2p.DiscQuitting
   247  				}
   248  			},
   249  			NodeInfo: func() interface{} {
   250  				return manager.NodeInfo()
   251  			},
   252  			PeerInfo: func(id discover.NodeID) interface{} {
   253  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   254  					return p.Info()
   255  				}
   256  				return nil
   257  			},
   258  		})
   259  
   260  		if cnconfig.SnapshotCacheSize > 0 {
   261  			for _, version := range snap.ProtocolVersions {
   262  				manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   263  					Name:    snap.ProtocolName,
   264  					Version: version,
   265  					Length:  snap.ProtocolLengths[version],
   266  					Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   267  						manager.wg.Add(1)
   268  						defer manager.wg.Done()
   269  						peer := snap.NewPeer(version, p, rw)
   270  						return manager.handleSnapPeer(peer)
   271  					},
   272  					RunWithRWs: func(p *p2p.Peer, rws []p2p.MsgReadWriter) error {
   273  						manager.wg.Add(1)
   274  						defer manager.wg.Done()
   275  						peer := snap.NewPeer(version, p, rws[p2p.ConnDefault])
   276  						return manager.handleSnapPeer(peer)
   277  					},
   278  					NodeInfo: func() interface{} {
   279  						return manager.NodeInfo()
   280  					},
   281  					PeerInfo: func(id discover.NodeID) interface{} {
   282  						if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   283  							return p.Info()
   284  						}
   285  						return nil
   286  					},
   287  				})
   288  			}
   289  		}
   290  	}
   291  
   292  	if len(manager.SubProtocols) == 0 {
   293  		return nil, errIncompatibleConfig
   294  	}
   295  
   296  	// Create and set downloader
   297  	if cnconfig.DownloaderDisable {
   298  		manager.downloader = downloader.NewFakeDownloader()
   299  	} else {
   300  		// Construct the downloader (long sync) and its backing state bloom if fast
   301  		// sync is requested. The downloader is responsible for deallocating the state
   302  
   303  		// bloom when it's done.
   304  		// Note: we don't enable it if snap-sync is performed, since it's very heavy
   305  		// and the heal-portion of the snap sync is much lighter than fast. What we particularly
   306  		// want to avoid, is a 90%-finished (but restarted) snap-sync to begin
   307  		// indexing the entire trie
   308  		var stateBloom *statedb.SyncBloom
   309  		if atomic.LoadUint32(&manager.fastSync) == 1 && atomic.LoadUint32(&manager.snapSync) == 0 {
   310  			stateBloom = statedb.NewSyncBloom(uint64(cacheLimit), chainDB.GetStateTrieDB())
   311  		}
   312  		var proposerPolicy uint64
   313  		if config.Istanbul != nil {
   314  			proposerPolicy = config.Istanbul.ProposerPolicy
   315  		}
   316  		manager.downloader = downloader.New(mode, chainDB, stateBloom, manager.eventMux, blockchain, nil, manager.removePeer, proposerPolicy)
   317  	}
   318  
   319  	// Create and set fetcher
   320  	if cnconfig.FetcherDisable {
   321  		manager.fetcher = fetcher.NewFakeFetcher()
   322  	} else {
   323  		validator := func(header *types.Header) error {
   324  			return engine.VerifyHeader(blockchain, header, true)
   325  		}
   326  		heighter := func() uint64 {
   327  			return blockchain.CurrentBlock().NumberU64()
   328  		}
   329  		inserter := func(blocks types.Blocks) (int, error) {
   330  			// If fast sync is running, deny importing weird blocks
   331  			if atomic.LoadUint32(&manager.fastSync) == 1 || atomic.LoadUint32(&manager.snapSync) == 1 {
   332  				logger.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   333  				return 0, nil
   334  			}
   335  			atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   336  			return manager.blockchain.InsertChain(blocks)
   337  		}
   338  		manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, manager.BroadcastBlockHash, heighter, inserter, manager.removePeer)
   339  	}
   340  
   341  	if manager.useTxResend() {
   342  		go manager.txResendLoop(cnconfig.TxResendInterval, cnconfig.TxResendCount)
   343  	}
   344  	return manager, nil
   345  }
   346  
   347  // istanbul BFT
   348  func (pm *ProtocolManager) RegisterValidator(connType common.ConnType, validator p2p.PeerTypeValidator) {
   349  	pm.peers.RegisterValidator(connType, validator)
   350  }
   351  
   352  func (pm *ProtocolManager) getWSEndPoint() string {
   353  	return pm.wsendpoint
   354  }
   355  
   356  func (pm *ProtocolManager) SetRewardbase(addr common.Address) {
   357  	pm.rewardbase = addr
   358  }
   359  
   360  func (pm *ProtocolManager) SetRewardbaseWallet(wallet accounts.Wallet) {
   361  	pm.rewardwallet = wallet
   362  }
   363  
   364  func (pm *ProtocolManager) removePeer(id string) {
   365  	// Short circuit if the peer was already removed
   366  	peer := pm.peers.Peer(id)
   367  	if peer == nil {
   368  		return
   369  	}
   370  	logger.Debug("Removing Klaytn peer", "peer", id)
   371  	if peer.ExistSnapExtension() {
   372  		pm.downloader.GetSnapSyncer().Unregister(id)
   373  	}
   374  
   375  	// Unregister the peer from the downloader and peer set
   376  	pm.downloader.UnregisterPeer(id)
   377  	if err := pm.peers.Unregister(id); err != nil {
   378  		logger.Error("Peer removal failed", "peer", id, "err", err)
   379  	}
   380  	// Hard disconnect at the networking layer
   381  	if peer != nil {
   382  		peer.GetP2PPeer().Disconnect(p2p.DiscUselessPeer)
   383  	}
   384  }
   385  
   386  // getChainID returns the current chain id.
   387  func (pm *ProtocolManager) getChainID() *big.Int {
   388  	return pm.blockchain.Config().ChainID
   389  }
   390  
   391  func (pm *ProtocolManager) Start(maxPeers int) {
   392  	pm.maxPeers = maxPeers
   393  
   394  	// broadcast transactions
   395  	pm.txsCh = make(chan blockchain.NewTxsEvent, txChanSize)
   396  	pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
   397  	go pm.txBroadcastLoop()
   398  
   399  	// broadcast mined blocks
   400  	pm.minedBlockSub = pm.eventMux.Subscribe(blockchain.NewMinedBlockEvent{})
   401  	go pm.minedBroadcastLoop()
   402  
   403  	// start sync handlers
   404  	go pm.syncer()
   405  	go pm.txsyncLoop()
   406  }
   407  
   408  func (pm *ProtocolManager) Stop() {
   409  	logger.Info("Stopping Klaytn protocol")
   410  
   411  	pm.txsSub.Unsubscribe()        // quits txBroadcastLoop
   412  	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   413  
   414  	// Quit the sync loop.
   415  	// After this send has completed, no new peers will be accepted.
   416  	pm.noMorePeers <- struct{}{}
   417  
   418  	if pm.useTxResend() {
   419  		// Quit resend loop
   420  		pm.quitResendCh <- struct{}{}
   421  	}
   422  
   423  	// Quit fetcher, txsyncLoop.
   424  	close(pm.quitSync)
   425  
   426  	// Disconnect existing sessions.
   427  	// This also closes the gate for any new registrations on the peer set.
   428  	// sessions which are already established but not added to pm.peers yet
   429  	// will exit when they try to register.
   430  	pm.peers.Close()
   431  
   432  	// Wait for all peer handler goroutines and the loops to come down.
   433  	pm.wg.Wait()
   434  
   435  	logger.Info("Klaytn protocol stopped")
   436  }
   437  
   438  // SetSyncStop sets value of syncStop flag. If it's true, peer sync process does not proceed.
   439  func (pm *ProtocolManager) SetSyncStop(flag bool) {
   440  	var i int32 = 0
   441  	if flag {
   442  		i = 1
   443  	}
   444  	atomic.StoreInt32(&(pm.syncStop), int32(i))
   445  }
   446  
   447  func (pm *ProtocolManager) GetSyncStop() bool {
   448  	if atomic.LoadInt32(&(pm.syncStop)) != 0 {
   449  		return true
   450  	}
   451  	return false
   452  }
   453  
   454  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) Peer {
   455  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   456  }
   457  
   458  // newPeerWithRWs creates a new Peer object with a slice of p2p.MsgReadWriter.
   459  func (pm *ProtocolManager) newPeerWithRWs(pv int, p *p2p.Peer, rws []p2p.MsgReadWriter) (Peer, error) {
   460  	meteredRWs := make([]p2p.MsgReadWriter, 0, len(rws))
   461  	for _, rw := range rws {
   462  		meteredRWs = append(meteredRWs, newMeteredMsgWriter(rw))
   463  	}
   464  	return newPeerWithRWs(pv, p, meteredRWs)
   465  }
   466  
   467  func (pm *ProtocolManager) handleSnapPeer(peer *snap.Peer) error {
   468  	pm.peerWg.Add(1)
   469  	defer pm.peerWg.Done()
   470  
   471  	if err := pm.peers.RegisterSnapExtension(peer); err != nil {
   472  		peer.Log().Warn("Snapshot extension registration failed", "err", err)
   473  		return err
   474  	}
   475  
   476  	return snap.Handle(pm.blockchain, pm.downloader, peer)
   477  }
   478  
   479  // handle is the callback invoked to manage the life cycle of a Klaytn peer. When
   480  // this function terminates, the peer is disconnected.
   481  func (pm *ProtocolManager) handle(p Peer) error {
   482  	// If the peer has a `snap` extension, wait for it to connect so we can have
   483  	// a uniform initialization/teardown mechanism
   484  	snap, err := pm.peers.WaitSnapExtension(p)
   485  	if err != nil {
   486  		p.GetP2PPeer().Log().Error("Snapshot extension barrier failed", "err", err)
   487  		return err
   488  	}
   489  
   490  	// Ignore maxPeers if this is a trusted peer
   491  	if pm.peers.Len() >= pm.maxPeers && !p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
   492  		return p2p.DiscTooManyPeers
   493  	}
   494  	p.GetP2PPeer().Log().Debug("Klaytn peer connected", "name", p.GetP2PPeer().Name())
   495  
   496  	pm.peerWg.Add(1)
   497  	defer pm.peerWg.Done()
   498  
   499  	// Execute the handshake
   500  	var (
   501  		genesis = pm.blockchain.Genesis()
   502  		head    = pm.blockchain.CurrentHeader()
   503  		hash    = head.Hash()
   504  		number  = head.Number.Uint64()
   505  		td      = pm.blockchain.GetTd(hash, number)
   506  	)
   507  
   508  	if err := p.Handshake(pm.networkId, pm.getChainID(), td, hash, genesis.Hash()); err != nil {
   509  		p.GetP2PPeer().Log().Debug("Klaytn peer handshake failed", "err", err)
   510  		return err
   511  	}
   512  	reject := false
   513  	if atomic.LoadUint32(&pm.snapSync) == 1 {
   514  		if snap == nil {
   515  			// If we are running snap-sync, we want to reserve roughly half the peer
   516  			// slots for peers supporting the snap protocol.
   517  			// The logic here is; we only allow up to ExtraNonSnapPeers more non-snap peers than snap-peers.
   518  			if all, snp := pm.peers.Len(), pm.peers.SnapLen(); all-snp > snp+ExtraNonSnapPeers {
   519  				reject = true
   520  			}
   521  		}
   522  	}
   523  	// Ignore maxPeers if this is a trusted peer
   524  	if p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
   525  		if reject || pm.peers.Len() >= pm.maxPeers {
   526  			return p2p.DiscTooManyPeers
   527  		}
   528  	}
   529  
   530  	if rw, ok := p.GetRW().(*meteredMsgReadWriter); ok {
   531  		rw.Init(p.GetVersion())
   532  	}
   533  
   534  	// Register the peer locally
   535  	if err := pm.peers.Register(p, snap); err != nil {
   536  		// if starting node with unlock account, can't register peer until finish unlock
   537  		p.GetP2PPeer().Log().Info("Klaytn peer registration failed", "err", err)
   538  		return err
   539  	}
   540  	defer pm.removePeer(p.GetID())
   541  
   542  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   543  	if err := pm.downloader.RegisterPeer(p.GetID(), p.GetVersion(), p); err != nil {
   544  		return err
   545  	}
   546  	if snap != nil {
   547  		if err := pm.downloader.GetSnapSyncer().Register(snap); err != nil {
   548  			p.GetP2PPeer().Log().Info("Failed to register peer in snap syncer", "err", err)
   549  			return err
   550  		}
   551  	}
   552  
   553  	// Propagate existing transactions. new transactions appearing
   554  	// after this will be sent via broadcasts.
   555  	pm.syncTransactions(p)
   556  
   557  	p.GetP2PPeer().Log().Info("Added a single channel P2P Peer", "peerID", p.GetP2PPeerID())
   558  
   559  	pubKey, err := p.GetP2PPeerID().Pubkey()
   560  	if err != nil {
   561  		return err
   562  	}
   563  	addr := crypto.PubkeyToAddress(*pubKey)
   564  
   565  	// TODO-Klaytn check global worker and peer worker
   566  	messageChannel := make(chan p2p.Msg, channelSizePerPeer)
   567  	defer close(messageChannel)
   568  	errChannel := make(chan error, channelSizePerPeer)
   569  	for w := 1; w <= concurrentPerPeer; w++ {
   570  		go pm.processMsg(messageChannel, p, addr, errChannel)
   571  	}
   572  
   573  	// main loop. handle incoming messages.
   574  	for {
   575  		msg, err := p.GetRW().ReadMsg()
   576  		if err != nil {
   577  			p.GetP2PPeer().Log().Warn("ProtocolManager failed to read msg", "err", err)
   578  			return err
   579  		}
   580  		if msg.Size > ProtocolMaxMsgSize {
   581  			err := errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   582  			p.GetP2PPeer().Log().Warn("ProtocolManager over max msg size", "err", err)
   583  			return err
   584  		}
   585  
   586  		select {
   587  		case err := <-errChannel:
   588  			return err
   589  		case messageChannel <- msg:
   590  		}
   591  		// go pm.handleMsg(p, addr, msg)
   592  
   593  		//if err := pm.handleMsg(p); err != nil {
   594  		//	p.Log().Debug("Klaytn message handling failed", "err", err)
   595  		//	return err
   596  		//}
   597  	}
   598  }
   599  
   600  func (pm *ProtocolManager) processMsg(msgCh <-chan p2p.Msg, p Peer, addr common.Address, errCh chan<- error) {
   601  	defer func() {
   602  		if err := recover(); err != nil {
   603  			logger.Error("stacktrace from panic: \n" + string(debug.Stack()))
   604  			logger.Warn("the panic is recovered", "panicErr", err)
   605  			errCh <- errUnknownProcessingError
   606  		}
   607  	}()
   608  
   609  	_, fakeF := pm.fetcher.(*fetcher.FakeFetcher)
   610  	_, fakeD := pm.downloader.(*downloader.FakeDownloader)
   611  	if fakeD || fakeF {
   612  		p.GetP2PPeer().Log().Warn("ProtocolManager does not handle p2p messages", "fakeFetcher", fakeF, "fakeDownloader", fakeD)
   613  		for msg := range msgCh {
   614  			msg.Discard()
   615  		}
   616  	} else {
   617  		for msg := range msgCh {
   618  			if err := pm.handleMsg(p, addr, msg); err != nil {
   619  				p.GetP2PPeer().Log().Error("ProtocolManager failed to handle message", "msg", msg, "err", err)
   620  				errCh <- err
   621  				return
   622  			}
   623  			msg.Discard()
   624  		}
   625  	}
   626  
   627  	p.GetP2PPeer().Log().Debug("ProtocolManager.processMsg closed", "PeerName", p.GetP2PPeer().Name())
   628  }
   629  
   630  // processConsensusMsg processes the consensus message.
   631  func (pm *ProtocolManager) processConsensusMsg(msgCh <-chan p2p.Msg, p Peer, addr common.Address, errCh chan<- error) {
   632  	for msg := range msgCh {
   633  		if handler, ok := pm.engine.(consensus.Handler); ok {
   634  			_, err := handler.HandleMsg(addr, msg)
   635  			// if msg is istanbul msg, handled is true and err is nil if handle msg is successful.
   636  			if err != nil {
   637  				p.GetP2PPeer().Log().Warn("ProtocolManager failed to handle consensus message. This can happen during block synchronization.", "msg", msg, "err", err)
   638  				errCh <- err
   639  				return
   640  			}
   641  		}
   642  		msg.Discard()
   643  	}
   644  	p.GetP2PPeer().Log().Info("ProtocolManager.processConsensusMsg closed", "PeerName", p.GetP2PPeer().Name())
   645  }
   646  
   647  // handleMsg is invoked whenever an inbound message is received from a remote
   648  // peer. The remote connection is torn down upon returning any error.
   649  func (pm *ProtocolManager) handleMsg(p Peer, addr common.Address, msg p2p.Msg) error {
   650  	// Below message size checking is done by handle().
   651  	// Read the next message from the remote peer, and ensure it's fully consumed
   652  	//msg, err := p.rw.ReadMsg()
   653  	//if err != nil {
   654  	//	return err
   655  	//}
   656  	//if msg.Size > ProtocolMaxMsgSize {
   657  	//	return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   658  	//}
   659  	//defer msg.Discard()
   660  
   661  	// istanbul BFT
   662  	if handler, ok := pm.engine.(consensus.Handler); ok {
   663  		//pubKey, err := p.ID().Pubkey()
   664  		//if err != nil {
   665  		//	return err
   666  		//}
   667  		//addr := crypto.PubkeyToAddress(*pubKey)
   668  		handled, err := handler.HandleMsg(addr, msg)
   669  		// if msg is istanbul msg, handled is true and err is nil if handle msg is successful.
   670  		if handled {
   671  			return err
   672  		}
   673  	}
   674  
   675  	// Handle the message depending on its contents
   676  	switch {
   677  	case msg.Code == StatusMsg:
   678  		// Status messages should never arrive after the handshake
   679  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   680  
   681  		// Block header query, collect the requested headers and reply
   682  	case msg.Code == BlockHeadersRequestMsg:
   683  		if err := handleBlockHeadersRequestMsg(pm, p, msg); err != nil {
   684  			return err
   685  		}
   686  
   687  	case msg.Code == BlockHeadersMsg:
   688  		if err := handleBlockHeadersMsg(pm, p, msg); err != nil {
   689  			return err
   690  		}
   691  
   692  	case msg.Code == BlockBodiesRequestMsg:
   693  		if err := handleBlockBodiesRequestMsg(pm, p, msg); err != nil {
   694  			return err
   695  		}
   696  
   697  	case msg.Code == BlockBodiesMsg:
   698  		if err := handleBlockBodiesMsg(pm, p, msg); err != nil {
   699  			return err
   700  		}
   701  
   702  	case p.GetVersion() >= klay63 && msg.Code == NodeDataRequestMsg:
   703  		if err := handleNodeDataRequestMsg(pm, p, msg); err != nil {
   704  			return err
   705  		}
   706  
   707  	case p.GetVersion() >= klay63 && msg.Code == NodeDataMsg:
   708  		if err := handleNodeDataMsg(pm, p, msg); err != nil {
   709  			return err
   710  		}
   711  
   712  	case p.GetVersion() >= klay63 && msg.Code == ReceiptsRequestMsg:
   713  		if err := handleReceiptsRequestMsg(pm, p, msg); err != nil {
   714  			return err
   715  		}
   716  
   717  	case p.GetVersion() >= klay63 && msg.Code == ReceiptsMsg:
   718  		if err := handleReceiptsMsg(pm, p, msg); err != nil {
   719  			return err
   720  		}
   721  
   722  	case p.GetVersion() >= klay65 && msg.Code == StakingInfoRequestMsg:
   723  		if err := handleStakingInfoRequestMsg(pm, p, msg); err != nil {
   724  			return err
   725  		}
   726  
   727  	case p.GetVersion() >= klay65 && msg.Code == StakingInfoMsg:
   728  		if err := handleStakingInfoMsg(pm, p, msg); err != nil {
   729  			return err
   730  		}
   731  
   732  	case msg.Code == NewBlockHashesMsg:
   733  		if err := handleNewBlockHashesMsg(pm, p, msg); err != nil {
   734  			return err
   735  		}
   736  
   737  	case msg.Code == BlockHeaderFetchRequestMsg:
   738  		if err := handleBlockHeaderFetchRequestMsg(pm, p, msg); err != nil {
   739  			return err
   740  		}
   741  
   742  	case msg.Code == BlockHeaderFetchResponseMsg:
   743  		if err := handleBlockHeaderFetchResponseMsg(pm, p, msg); err != nil {
   744  			return err
   745  		}
   746  
   747  	case msg.Code == BlockBodiesFetchRequestMsg:
   748  		if err := handleBlockBodiesFetchRequestMsg(pm, p, msg); err != nil {
   749  			return err
   750  		}
   751  
   752  	case msg.Code == BlockBodiesFetchResponseMsg:
   753  		if err := handleBlockBodiesFetchResponseMsg(pm, p, msg); err != nil {
   754  			return err
   755  		}
   756  
   757  	case msg.Code == NewBlockMsg:
   758  		if err := handleNewBlockMsg(pm, p, msg); err != nil {
   759  			return err
   760  		}
   761  
   762  	case msg.Code == TxMsg:
   763  		if err := handleTxMsg(pm, p, msg); err != nil {
   764  			return err
   765  		}
   766  
   767  	default:
   768  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   769  	}
   770  	return nil
   771  }
   772  
   773  // handleBlockHeadersRequestMsg handles block header request message.
   774  func handleBlockHeadersRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   775  	// Decode the complex header query
   776  	var query getBlockHeadersData
   777  	if err := msg.Decode(&query); err != nil {
   778  		return errResp(ErrDecode, "%v: %v", msg, err)
   779  	}
   780  	hashMode := query.Origin.Hash != (common.Hash{})
   781  
   782  	// Gather headers until the fetch or network limits is reached
   783  	var (
   784  		bytes   common.StorageSize
   785  		headers []*types.Header
   786  		unknown bool
   787  	)
   788  	for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   789  		// Retrieve the next header satisfying the query
   790  		var origin *types.Header
   791  		if hashMode {
   792  			origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   793  		} else {
   794  			origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   795  		}
   796  		if origin == nil {
   797  			break
   798  		}
   799  		number := origin.Number.Uint64()
   800  		headers = append(headers, origin)
   801  		bytes += estHeaderRlpSize
   802  
   803  		// Advance to the next header of the query
   804  		switch {
   805  		case query.Origin.Hash != (common.Hash{}) && query.Reverse:
   806  			// Hash based traversal towards the genesis block
   807  			for i := 0; i < int(query.Skip)+1; i++ {
   808  				if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
   809  					query.Origin.Hash = header.ParentHash
   810  					number--
   811  				} else {
   812  					unknown = true
   813  					break
   814  				}
   815  			}
   816  		case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
   817  			// Hash based traversal towards the leaf block
   818  			var (
   819  				current = origin.Number.Uint64()
   820  				next    = current + query.Skip + 1
   821  			)
   822  			if next <= current {
   823  				infos, _ := json.MarshalIndent(p.GetP2PPeer().Info(), "", "  ")
   824  				p.GetP2PPeer().Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   825  				unknown = true
   826  			} else {
   827  				if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   828  					if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
   829  						query.Origin.Hash = header.Hash()
   830  					} else {
   831  						unknown = true
   832  					}
   833  				} else {
   834  					unknown = true
   835  				}
   836  			}
   837  		case query.Reverse:
   838  			// Number based traversal towards the genesis block
   839  			if query.Origin.Number >= query.Skip+1 {
   840  				query.Origin.Number -= query.Skip + 1
   841  			} else {
   842  				unknown = true
   843  			}
   844  
   845  		case !query.Reverse:
   846  			// Number based traversal towards the leaf block
   847  			query.Origin.Number += query.Skip + 1
   848  		}
   849  	}
   850  	return p.SendBlockHeaders(headers)
   851  }
   852  
   853  // handleBlockHeadersMsg handles block header response message.
   854  func handleBlockHeadersMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   855  	// A batch of headers arrived to one of our previous requests
   856  	var headers []*types.Header
   857  	if err := msg.Decode(&headers); err != nil {
   858  		return errResp(ErrDecode, "msg %v: %v", msg, err)
   859  	}
   860  	if err := pm.downloader.DeliverHeaders(p.GetID(), headers); err != nil {
   861  		logger.Debug("Failed to deliver headers", "err", err)
   862  	}
   863  	return nil
   864  }
   865  
   866  // handleBlockBodiesRequest handles common part for handleBlockBodiesRequest and
   867  // handleBlockBodiesFetchRequestMsg. It decodes the message to get list of hashes
   868  // and then send block bodies corresponding to those hashes.
   869  func handleBlockBodiesRequest(pm *ProtocolManager, p Peer, msg p2p.Msg) ([]rlp.RawValue, error) {
   870  	// Decode the retrieval message
   871  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   872  	if _, err := msgStream.List(); err != nil {
   873  		return nil, err
   874  	}
   875  	// Gather blocks until the fetch or network limits is reached
   876  	var (
   877  		hash   common.Hash
   878  		bytes  int
   879  		bodies []rlp.RawValue
   880  	)
   881  	for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   882  		// Retrieve the hash of the next block
   883  		if err := msgStream.Decode(&hash); err == rlp.EOL {
   884  			break
   885  		} else if err != nil {
   886  			return nil, errResp(ErrDecode, "msg %v: %v", msg, err)
   887  		}
   888  		// Retrieve the requested block body, stopping if enough was found
   889  		if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   890  			bodies = append(bodies, data)
   891  			bytes += len(data)
   892  		}
   893  	}
   894  
   895  	return bodies, nil
   896  }
   897  
   898  // handleBlockBodiesRequestMsg handles block body request message.
   899  func handleBlockBodiesRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   900  	if bodies, err := handleBlockBodiesRequest(pm, p, msg); err != nil {
   901  		return err
   902  	} else {
   903  		return p.SendBlockBodiesRLP(bodies)
   904  	}
   905  }
   906  
   907  // handleGetBlockBodiesMsg handles block body response message.
   908  func handleBlockBodiesMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   909  	// A batch of block bodies arrived to one of our previous requests
   910  	var request blockBodiesData
   911  	if err := msg.Decode(&request); err != nil {
   912  		return errResp(ErrDecode, "msg %v: %v", msg, err)
   913  	}
   914  	// Deliver them all to the downloader for queuing
   915  	transactions := make([][]*types.Transaction, len(request))
   916  
   917  	for i, body := range request {
   918  		transactions[i] = body.Transactions
   919  	}
   920  
   921  	err := pm.downloader.DeliverBodies(p.GetID(), transactions)
   922  	if err != nil {
   923  		logger.Debug("Failed to deliver bodies", "err", err)
   924  	}
   925  
   926  	return nil
   927  }
   928  
   929  // handleNodeDataRequestMsg handles node data request message.
   930  func handleNodeDataRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   931  	// Decode the retrieval message
   932  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   933  	if _, err := msgStream.List(); err != nil {
   934  		return err
   935  	}
   936  	// Gather state data until the fetch or network limits is reached
   937  	var (
   938  		hash  common.Hash
   939  		bytes int
   940  		data  [][]byte
   941  	)
   942  	for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   943  		// Retrieve the hash of the next state entry
   944  		if err := msgStream.Decode(&hash); err == rlp.EOL {
   945  			break
   946  		} else if err != nil {
   947  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   948  		}
   949  		// Retrieve the requested state entry, stopping if enough was found
   950  		// TODO-Klaytn-Snapsync now the code and trienode is mixed in the protocol level, separate these two types.
   951  		entry, err := pm.blockchain.TrieNode(hash)
   952  		if len(entry) == 0 || err != nil {
   953  			// Read the contract code with prefix only to save unnecessary lookups.
   954  			entry, err = pm.blockchain.ContractCodeWithPrefix(hash)
   955  		}
   956  		if err == nil && len(entry) > 0 {
   957  			data = append(data, entry)
   958  			bytes += len(entry)
   959  		}
   960  	}
   961  	return p.SendNodeData(data)
   962  }
   963  
   964  // handleNodeDataMsg handles node data response message.
   965  func handleNodeDataMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   966  	// A batch of node state data arrived to one of our previous requests
   967  	var data [][]byte
   968  	if err := msg.Decode(&data); err != nil {
   969  		return errResp(ErrDecode, "msg %v: %v", msg, err)
   970  	}
   971  	// Deliver all to the downloader
   972  	if err := pm.downloader.DeliverNodeData(p.GetID(), data); err != nil {
   973  		logger.Debug("Failed to deliver node state data", "err", err)
   974  	}
   975  	return nil
   976  }
   977  
   978  // handleGetReceiptsMsg handles receipt request message.
   979  func handleReceiptsRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
   980  	// Decode the retrieval message
   981  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   982  	if _, err := msgStream.List(); err != nil {
   983  		return err
   984  	}
   985  	// Gather state data until the fetch or network limits is reached
   986  	var (
   987  		hash     common.Hash
   988  		bytes    int
   989  		receipts []rlp.RawValue
   990  	)
   991  	for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   992  		// Retrieve the hash of the next block
   993  		if err := msgStream.Decode(&hash); err == rlp.EOL {
   994  			break
   995  		} else if err != nil {
   996  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   997  		}
   998  		// Retrieve the requested block's receipts, skipping if unknown to us
   999  		results := pm.blockchain.GetReceiptsByBlockHash(hash)
  1000  		if results == nil {
  1001  			if header := pm.blockchain.GetHeaderByHash(hash); header == nil || !header.EmptyReceipts() {
  1002  				continue
  1003  			}
  1004  		}
  1005  		// If known, encode and queue for response packet
  1006  		if encoded, err := rlp.EncodeToBytes(results); err != nil {
  1007  			logger.Error("Failed to encode receipt", "err", err)
  1008  		} else {
  1009  			receipts = append(receipts, encoded)
  1010  			bytes += len(encoded)
  1011  		}
  1012  	}
  1013  	return p.SendReceiptsRLP(receipts)
  1014  }
  1015  
  1016  // handleReceiptsMsg handles receipt response message.
  1017  func handleReceiptsMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1018  	// A batch of receipts arrived to one of our previous requests
  1019  	var receipts [][]*types.Receipt
  1020  	if err := msg.Decode(&receipts); err != nil {
  1021  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1022  	}
  1023  	// Deliver all to the downloader
  1024  	if err := pm.downloader.DeliverReceipts(p.GetID(), receipts); err != nil {
  1025  		logger.Debug("Failed to deliver receipts", "err", err)
  1026  	}
  1027  	return nil
  1028  }
  1029  
  1030  // handleStakingInfoRequestMsg handles staking information request message.
  1031  func handleStakingInfoRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1032  	if pm.chainconfig.Istanbul == nil || pm.chainconfig.Istanbul.ProposerPolicy != uint64(istanbul.WeightedRandom) {
  1033  		return errResp(ErrUnsupportedEnginePolicy, "the engine is not istanbul or the policy is not weighted random")
  1034  	}
  1035  
  1036  	// Decode the retrieval message
  1037  	msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
  1038  	if _, err := msgStream.List(); err != nil {
  1039  		return err
  1040  	}
  1041  	// Gather state data until the fetch or network limits is reached
  1042  	var (
  1043  		hash         common.Hash
  1044  		bytes        int
  1045  		stakingInfos []rlp.RawValue
  1046  	)
  1047  	for bytes < softResponseLimit && len(stakingInfos) < downloader.MaxStakingInfoFetch {
  1048  		// Retrieve the hash of the next block
  1049  		if err := msgStream.Decode(&hash); err == rlp.EOL {
  1050  			break
  1051  		} else if err != nil {
  1052  			return errResp(ErrDecode, "msg %v: %v", msg, err)
  1053  		}
  1054  
  1055  		// Retrieve the requested block's staking information, skipping if unknown to us
  1056  		header := pm.blockchain.GetHeaderByHash(hash)
  1057  		if header == nil {
  1058  			logger.Error("Failed to get header", "hash", hash)
  1059  			continue
  1060  		}
  1061  		result := reward.GetStakingInfoOnStakingBlock(header.Number.Uint64())
  1062  		if result == nil {
  1063  			logger.Error("Failed to get staking information on a specific block", "number", header.Number.Uint64(), "hash", hash)
  1064  			continue
  1065  		}
  1066  		// If known, encode and queue for response packet
  1067  		if encoded, err := rlp.EncodeToBytes(result); err != nil {
  1068  			logger.Error("Failed to encode staking info", "err", err)
  1069  		} else {
  1070  			stakingInfos = append(stakingInfos, encoded)
  1071  			bytes += len(encoded)
  1072  		}
  1073  	}
  1074  	return p.SendStakingInfoRLP(stakingInfos)
  1075  }
  1076  
  1077  // handleStakingInfoMsg handles staking information response message.
  1078  func handleStakingInfoMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1079  	if pm.chainconfig.Istanbul == nil || pm.chainconfig.Istanbul.ProposerPolicy != uint64(istanbul.WeightedRandom) {
  1080  		return errResp(ErrUnsupportedEnginePolicy, "the engine is not istanbul or the policy is not weighted random")
  1081  	}
  1082  
  1083  	// A batch of stakingInfos arrived to one of our previous requests
  1084  	var stakingInfos []*reward.StakingInfo
  1085  	if err := msg.Decode(&stakingInfos); err != nil {
  1086  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1087  	}
  1088  	// Deliver all to the downloader
  1089  	if err := pm.downloader.DeliverStakingInfos(p.GetID(), stakingInfos); err != nil {
  1090  		logger.Debug("Failed to deliver staking information", "err", err)
  1091  	}
  1092  	return nil
  1093  }
  1094  
  1095  // handleNewBlockHashesMsg handles new block hashes message.
  1096  func handleNewBlockHashesMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1097  	var (
  1098  		announces     newBlockHashesData
  1099  		maxTD         uint64
  1100  		candidateHash *common.Hash
  1101  	)
  1102  	if err := msg.Decode(&announces); err != nil {
  1103  		return errResp(ErrDecode, "%v: %v", msg, err)
  1104  	}
  1105  	// Mark the hashes as present at the remote node
  1106  	// Schedule all the unknown hashes for retrieval
  1107  	for _, block := range announces {
  1108  		p.AddToKnownBlocks(block.Hash)
  1109  
  1110  		if maxTD < block.Number {
  1111  			maxTD = block.Number
  1112  			candidateHash = &block.Hash
  1113  		}
  1114  		if !pm.blockchain.HasBlock(block.Hash, block.Number) {
  1115  			pm.fetcher.Notify(p.GetID(), block.Hash, block.Number, time.Now(), p.FetchBlockHeader, p.FetchBlockBodies)
  1116  		}
  1117  	}
  1118  	blockTD := big.NewInt(int64(maxTD))
  1119  	if _, td := p.Head(); blockTD.Cmp(td) > 0 && candidateHash != nil {
  1120  		p.SetHead(*candidateHash, blockTD)
  1121  	}
  1122  	return nil
  1123  }
  1124  
  1125  // handleBlockHeaderFetchRequestMsg handles block header fetch request message.
  1126  // It will send a header that the peer requested.
  1127  // If the peer requests a header which does not exist, error will be returned.
  1128  func handleBlockHeaderFetchRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1129  	var hash common.Hash
  1130  	if err := msg.Decode(&hash); err != nil {
  1131  		return errResp(ErrDecode, "%v: %v", msg, err)
  1132  	}
  1133  
  1134  	header := pm.blockchain.GetHeaderByHash(hash)
  1135  	if header == nil {
  1136  		return fmt.Errorf("peer requested header for non-existing hash. peer: %v, hash: %v", p.GetID(), hash)
  1137  	}
  1138  
  1139  	return p.SendFetchedBlockHeader(header)
  1140  }
  1141  
  1142  // handleBlockHeaderFetchResponseMsg handles new block header response message.
  1143  // This message should contain only one header.
  1144  func handleBlockHeaderFetchResponseMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1145  	var header *types.Header
  1146  	if err := msg.Decode(&header); err != nil {
  1147  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1148  	}
  1149  
  1150  	headers := pm.fetcher.FilterHeaders(p.GetID(), []*types.Header{header}, time.Now())
  1151  	if len(headers) != 0 {
  1152  		logger.Debug("Failed to filter header", "peer", p.GetID(),
  1153  			"num", header.Number.Uint64(), "hash", header.Hash(), "len(headers)", len(headers))
  1154  	}
  1155  
  1156  	return nil
  1157  }
  1158  
  1159  // handleBlockBodiesFetchRequestMsg handles block bodies fetch request message.
  1160  // If the peer requests bodies which do not exist, error will be returned.
  1161  func handleBlockBodiesFetchRequestMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1162  	if bodies, err := handleBlockBodiesRequest(pm, p, msg); err != nil {
  1163  		return err
  1164  	} else {
  1165  		return p.SendFetchedBlockBodiesRLP(bodies)
  1166  	}
  1167  }
  1168  
  1169  // handleBlockBodiesFetchResponseMsg handles block bodies fetch response message.
  1170  func handleBlockBodiesFetchResponseMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1171  	// A batch of block bodies arrived to one of our previous requests
  1172  	var request blockBodiesData
  1173  	if err := msg.Decode(&request); err != nil {
  1174  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1175  	}
  1176  	// Deliver them all to the downloader for queuing
  1177  	transactions := make([][]*types.Transaction, len(request))
  1178  
  1179  	for i, body := range request {
  1180  		transactions[i] = body.Transactions
  1181  	}
  1182  
  1183  	transactions = pm.fetcher.FilterBodies(p.GetID(), transactions, time.Now())
  1184  
  1185  	if len(transactions) > 0 {
  1186  		logger.Warn("Failed to filter bodies", "peer", p.GetID(), "lenTxs", len(transactions))
  1187  	}
  1188  	return nil
  1189  }
  1190  
  1191  // handleNewBlockMsg handles new block message.
  1192  func handleNewBlockMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1193  	// Retrieve and decode the propagated block
  1194  	var request newBlockData
  1195  	if err := msg.Decode(&request); err != nil {
  1196  		return errResp(ErrDecode, "%v: %v", msg, err)
  1197  	}
  1198  	request.Block.ReceivedAt = msg.ReceivedAt
  1199  	request.Block.ReceivedFrom = p
  1200  
  1201  	// Mark the peer as owning the block and schedule it for import
  1202  	p.AddToKnownBlocks(request.Block.Hash())
  1203  	pm.fetcher.Enqueue(p.GetID(), request.Block)
  1204  
  1205  	// Assuming the block is importable by the peer, but possibly not yet done so,
  1206  	// calculate the head hash and TD that the peer truly must have.
  1207  	var (
  1208  		trueHead = request.Block.ParentHash()
  1209  		trueTD   = new(big.Int).Sub(request.TD, request.Block.BlockScore())
  1210  	)
  1211  	// Update the peers total blockscore if better than the previous
  1212  	if _, td := p.Head(); trueTD.Cmp(td) > 0 {
  1213  		p.SetHead(trueHead, trueTD)
  1214  
  1215  		// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
  1216  		// a singe block (as the true TD is below the propagated block), however this
  1217  		// scenario should easily be covered by the fetcher.
  1218  		currentBlock := pm.blockchain.CurrentBlock()
  1219  		if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
  1220  			go pm.synchronise(p)
  1221  		}
  1222  	}
  1223  	return nil
  1224  }
  1225  
  1226  // handleTxMsg handles transaction-propagating message.
  1227  func handleTxMsg(pm *ProtocolManager, p Peer, msg p2p.Msg) error {
  1228  	// Transactions arrived, make sure we have a valid and fresh chain to handle them
  1229  	if atomic.LoadUint32(&pm.acceptTxs) == 0 {
  1230  		return nil
  1231  	}
  1232  	// Transactions can be processed, parse all of them and deliver to the pool
  1233  	var txs types.Transactions
  1234  	if err := msg.Decode(&txs); err != nil {
  1235  		return errResp(ErrDecode, "msg %v: %v", msg, err)
  1236  	}
  1237  	// Only valid txs should be pushed into the pool.
  1238  	validTxs := make(types.Transactions, 0, len(txs))
  1239  	var err error
  1240  	for i, tx := range txs {
  1241  		// Validate and mark the remote transaction
  1242  		if tx == nil {
  1243  			err = errResp(ErrDecode, "transaction %d is nil", i)
  1244  			continue
  1245  		}
  1246  		p.AddToKnownTxs(tx.Hash())
  1247  		validTxs = append(validTxs, tx)
  1248  		txReceiveCounter.Inc(1)
  1249  	}
  1250  	pm.txpool.HandleTxMsg(validTxs)
  1251  	return err
  1252  }
  1253  
  1254  // sampleSize calculates the number of peers to send block.
  1255  // If calcSampleSize is smaller than minNumPeersToSendBlock, it returns minNumPeersToSendBlock.
  1256  // Otherwise, it returns calcSampleSize.
  1257  func sampleSize(peers []Peer) int {
  1258  	if len(peers) < minNumPeersToSendBlock {
  1259  		return len(peers)
  1260  	}
  1261  
  1262  	calcSampleSize := int(math.Sqrt(float64(len(peers))))
  1263  	if calcSampleSize > minNumPeersToSendBlock {
  1264  		return calcSampleSize
  1265  	} else {
  1266  		return minNumPeersToSendBlock
  1267  	}
  1268  }
  1269  
  1270  // BroadcastBlock will propagate a block to a subset of its peers.
  1271  // If current node is CN, it will send block to all PN peers + sampled CN peers without block.
  1272  // However, if there are more than 5 PN peers, it will sample 5 PN peers.
  1273  // If current node is not CN, it will send block to sampled peers except CNs.
  1274  func (pm *ProtocolManager) BroadcastBlock(block *types.Block) {
  1275  	if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent == nil {
  1276  		logger.Error("Propagating dangling block", "number", block.Number(), "hash", block.Hash())
  1277  		return
  1278  	}
  1279  	// TODO-Klaytn only send all validators + sub(peer) except subset for this block
  1280  	// transfer := peers[:int(math.Sqrt(float64(len(peers))))]
  1281  
  1282  	// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
  1283  	td := new(big.Int).Add(block.BlockScore(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
  1284  	peersToSendBlock := pm.peers.SamplePeersToSendBlock(block, pm.nodetype)
  1285  	for _, peer := range peersToSendBlock {
  1286  		peer.AsyncSendNewBlock(block, td)
  1287  	}
  1288  }
  1289  
  1290  // BroadcastBlockHash will propagate a blockHash to a subset of its peers.
  1291  func (pm *ProtocolManager) BroadcastBlockHash(block *types.Block) {
  1292  	if !pm.blockchain.HasBlock(block.Hash(), block.NumberU64()) {
  1293  		return
  1294  	}
  1295  
  1296  	// Otherwise if the block is indeed in out own chain, announce it
  1297  	peersWithoutBlock := pm.peers.PeersWithoutBlock(block.Hash())
  1298  	for _, peer := range peersWithoutBlock {
  1299  		// peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
  1300  		peer.AsyncSendNewBlockHash(block)
  1301  	}
  1302  	logger.Trace("Announced block", "hash", block.Hash(),
  1303  		"recipients", len(peersWithoutBlock), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
  1304  }
  1305  
  1306  // BroadcastTxs propagates a batch of transactions to its peers which are not known to
  1307  // already have the given transaction.
  1308  func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
  1309  	// This function calls sendTransaction() to broadcast the transactions for each peer.
  1310  	// In that case, transactions are sorted for each peer in sendTransaction().
  1311  	// Therefore, it prevents sorting transactions by each peer.
  1312  	if !sort.IsSorted(types.TxByTime(txs)) {
  1313  		sort.Sort(types.TxByTime(txs))
  1314  	}
  1315  
  1316  	switch pm.nodetype {
  1317  	case common.CONSENSUSNODE:
  1318  		pm.broadcastTxsFromCN(txs)
  1319  	case common.PROXYNODE:
  1320  		pm.broadcastTxsFromPN(txs)
  1321  	case common.ENDPOINTNODE:
  1322  		pm.broadcastTxsFromEN(txs)
  1323  	default:
  1324  		logger.Error("Unexpected nodeType of ProtocolManager", "nodeType", pm.nodetype)
  1325  	}
  1326  }
  1327  
  1328  func (pm *ProtocolManager) broadcastTxsFromCN(txs types.Transactions) {
  1329  	cnPeersWithoutTxs := make(map[Peer]types.Transactions)
  1330  	for _, tx := range txs {
  1331  		peers := pm.peers.CNWithoutTx(tx.Hash())
  1332  		if len(peers) == 0 {
  1333  			logger.Trace("No peer to broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
  1334  			continue
  1335  		}
  1336  
  1337  		// TODO-Klaytn Code Check
  1338  		// peers = peers[:int(math.Sqrt(float64(len(peers))))]
  1339  		half := (len(peers) / 2) + 2
  1340  		peers = samplingPeers(peers, half)
  1341  		for _, peer := range peers {
  1342  			cnPeersWithoutTxs[peer] = append(cnPeersWithoutTxs[peer], tx)
  1343  		}
  1344  		logger.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
  1345  	}
  1346  
  1347  	propTxPeersGauge.Update(int64(len(cnPeersWithoutTxs)))
  1348  	// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
  1349  	for peer, txs2 := range cnPeersWithoutTxs {
  1350  		// peer.SendTransactions(txs)
  1351  		peer.AsyncSendTransactions(txs2)
  1352  	}
  1353  }
  1354  
  1355  func (pm *ProtocolManager) broadcastTxsFromPN(txs types.Transactions) {
  1356  	cnPeersWithoutTxs := make(map[Peer]types.Transactions)
  1357  	peersWithoutTxs := make(map[Peer]types.Transactions)
  1358  	for _, tx := range txs {
  1359  		// TODO-Klaytn drop or missing tx
  1360  		cnPeers := pm.peers.CNWithoutTx(tx.Hash())
  1361  		if len(cnPeers) > 0 {
  1362  			cnPeers = samplingPeers(cnPeers, 2) // TODO-Klaytn optimize pickSize or propagation way
  1363  			for _, peer := range cnPeers {
  1364  				cnPeersWithoutTxs[peer] = append(cnPeersWithoutTxs[peer], tx)
  1365  			}
  1366  			logger.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(cnPeers))
  1367  		}
  1368  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.PROXYNODE, peersWithoutTxs)
  1369  		txSendCounter.Inc(1)
  1370  	}
  1371  
  1372  	propTxPeersGauge.Update(int64(len(peersWithoutTxs) + len(cnPeersWithoutTxs)))
  1373  	sendTransactions(cnPeersWithoutTxs)
  1374  	sendTransactions(peersWithoutTxs)
  1375  }
  1376  
  1377  func (pm *ProtocolManager) broadcastTxsFromEN(txs types.Transactions) {
  1378  	peersWithoutTxs := make(map[Peer]types.Transactions)
  1379  	for _, tx := range txs {
  1380  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.CONSENSUSNODE, peersWithoutTxs)
  1381  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.PROXYNODE, peersWithoutTxs)
  1382  		pm.peers.UpdateTypePeersWithoutTxs(tx, common.ENDPOINTNODE, peersWithoutTxs)
  1383  		txSendCounter.Inc(1)
  1384  	}
  1385  
  1386  	propTxPeersGauge.Update(int64(len(peersWithoutTxs)))
  1387  	sendTransactions(peersWithoutTxs)
  1388  }
  1389  
  1390  // ReBroadcastTxs sends transactions, not considering whether the peer has the transaction or not.
  1391  // Only PN and EN rebroadcast transactions to its peers, a CN does not rebroadcast transactions.
  1392  func (pm *ProtocolManager) ReBroadcastTxs(txs types.Transactions) {
  1393  	// A consensus node does not rebroadcast transactions, hence return here.
  1394  	if pm.nodetype == common.CONSENSUSNODE {
  1395  		return
  1396  	}
  1397  
  1398  	// This function calls sendTransaction() to broadcast the transactions for each peer.
  1399  	// In that case, transactions are sorted for each peer in sendTransaction().
  1400  	// Therefore, it prevents sorting transactions by each peer.
  1401  	if !sort.IsSorted(types.TxByTime(txs)) {
  1402  		sort.Sort(types.TxByTime(txs))
  1403  	}
  1404  
  1405  	peersWithoutTxs := make(map[Peer]types.Transactions)
  1406  	for _, tx := range txs {
  1407  		peers := pm.peers.SampleResendPeersByType(pm.nodetype)
  1408  		for _, peer := range peers {
  1409  			peersWithoutTxs[peer] = append(peersWithoutTxs[peer], tx)
  1410  		}
  1411  		txResendCounter.Inc(1)
  1412  	}
  1413  
  1414  	propTxPeersGauge.Update(int64(len(peersWithoutTxs)))
  1415  	sendTransactions(peersWithoutTxs)
  1416  }
  1417  
  1418  // sendTransactions iterates the given map with the key-value pair of Peer and Transactions
  1419  // and sends the paired transactions to the peer in synchronised way.
  1420  func sendTransactions(txsSet map[Peer]types.Transactions) {
  1421  	for peer, txs := range txsSet {
  1422  		if err := peer.SendTransactions(txs); err != nil {
  1423  			logger.Error("Failed to send txs", "peer", peer.GetAddr(), "peerType", peer.ConnType(), "numTxs", len(txs), "err", err)
  1424  		}
  1425  	}
  1426  }
  1427  
  1428  func samplingPeers(peers []Peer, pickSize int) []Peer {
  1429  	if len(peers) <= pickSize {
  1430  		return peers
  1431  	}
  1432  
  1433  	picker := rand.New(rand.NewSource(time.Now().Unix()))
  1434  	peerCount := len(peers)
  1435  	for i := 0; i < peerCount; i++ {
  1436  		randIndex := picker.Intn(peerCount)
  1437  		peers[i], peers[randIndex] = peers[randIndex], peers[i]
  1438  	}
  1439  
  1440  	return peers[:pickSize]
  1441  }
  1442  
  1443  // Mined broadcast loop
  1444  func (pm *ProtocolManager) minedBroadcastLoop() {
  1445  	// automatically stops if unsubscribe
  1446  	for obj := range pm.minedBlockSub.Chan() {
  1447  		switch ev := obj.Data.(type) {
  1448  		case blockchain.NewMinedBlockEvent:
  1449  			pm.BroadcastBlock(ev.Block)     // First propagate block to peers
  1450  			pm.BroadcastBlockHash(ev.Block) // Only then announce to the rest
  1451  		}
  1452  	}
  1453  }
  1454  
  1455  func (pm *ProtocolManager) txBroadcastLoop() {
  1456  	for {
  1457  		select {
  1458  		case event := <-pm.txsCh:
  1459  			pm.BroadcastTxs(event.Txs)
  1460  			// Err() channel will be closed when unsubscribing.
  1461  		case <-pm.txsSub.Err():
  1462  			return
  1463  		}
  1464  	}
  1465  }
  1466  
  1467  func (pm *ProtocolManager) txResendLoop(period uint64, maxTxCount int) {
  1468  	tick := time.Duration(period) * time.Second
  1469  	resend := time.NewTicker(tick)
  1470  	defer resend.Stop()
  1471  
  1472  	logger.Debug("txResendloop started", "period", tick.Seconds())
  1473  
  1474  	for {
  1475  		select {
  1476  		case <-resend.C:
  1477  			pending := pm.txpool.CachedPendingTxsByCount(maxTxCount)
  1478  			pm.txResend(pending)
  1479  		case <-pm.quitResendCh:
  1480  			logger.Debug("txResendloop stopped")
  1481  			return
  1482  		}
  1483  	}
  1484  }
  1485  
  1486  func (pm *ProtocolManager) txResend(pending types.Transactions) {
  1487  	txResendRoutineGauge.Update(txResendRoutineGauge.Value() + 1)
  1488  	defer txResendRoutineGauge.Update(txResendRoutineGauge.Value() - 1)
  1489  	// TODO-Klaytn drop or missing tx
  1490  	if len(pending) > 0 {
  1491  		logger.Debug("Tx Resend", "count", len(pending))
  1492  		pm.ReBroadcastTxs(pending)
  1493  	}
  1494  }
  1495  
  1496  func (pm *ProtocolManager) useTxResend() bool {
  1497  	if pm.nodetype != common.CONSENSUSNODE && !pm.txResendUseLegacy {
  1498  		return true
  1499  	}
  1500  	return false
  1501  }
  1502  
  1503  // NodeInfo represents a short summary of the Klaytn sub-protocol metadata
  1504  // known about the host peer.
  1505  type NodeInfo struct {
  1506  	// TODO-Klaytn describe predefined network ID below
  1507  	Network    uint64              `json:"network"`    // Klaytn network ID
  1508  	BlockScore *big.Int            `json:"blockscore"` // Total blockscore of the host's blockchain
  1509  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
  1510  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
  1511  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
  1512  }
  1513  
  1514  // NodeInfo retrieves some protocol metadata about the running host node.
  1515  func (pm *ProtocolManager) NodeInfo() *NodeInfo {
  1516  	currentBlock := pm.blockchain.CurrentBlock()
  1517  	return &NodeInfo{
  1518  		Network:    pm.networkId,
  1519  		BlockScore: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
  1520  		Genesis:    pm.blockchain.Genesis().Hash(),
  1521  		Config:     pm.blockchain.Config(),
  1522  		Head:       currentBlock.Hash(),
  1523  	}
  1524  }
  1525  
  1526  // Below functions are used in Istanbul BFT consensus.
  1527  // Enqueue wraps fetcher's Enqueue function to insert the given block.
  1528  func (pm *ProtocolManager) Enqueue(id string, block *types.Block) {
  1529  	pm.fetcher.Enqueue(id, block)
  1530  }
  1531  
  1532  func (pm *ProtocolManager) FindPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
  1533  	m := make(map[common.Address]consensus.Peer)
  1534  	for _, p := range pm.peers.Peers() {
  1535  		addr := p.GetAddr()
  1536  		if addr == (common.Address{}) {
  1537  			pubKey, err := p.GetP2PPeerID().Pubkey()
  1538  			if err != nil {
  1539  				continue
  1540  			}
  1541  			addr = crypto.PubkeyToAddress(*pubKey)
  1542  			p.SetAddr(addr)
  1543  		}
  1544  		if targets[addr] {
  1545  			m[addr] = p
  1546  		}
  1547  	}
  1548  	return m
  1549  }
  1550  
  1551  func (pm *ProtocolManager) GetCNPeers() map[common.Address]consensus.Peer {
  1552  	m := make(map[common.Address]consensus.Peer)
  1553  	for addr, p := range pm.peers.CNPeers() {
  1554  		m[addr] = p
  1555  	}
  1556  	return m
  1557  }
  1558  
  1559  func (pm *ProtocolManager) FindCNPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
  1560  	m := make(map[common.Address]consensus.Peer)
  1561  	for addr, p := range pm.peers.CNPeers() {
  1562  		if targets[addr] {
  1563  			m[addr] = p
  1564  		}
  1565  	}
  1566  	return m
  1567  }
  1568  
  1569  func (pm *ProtocolManager) GetENPeers() map[common.Address]consensus.Peer {
  1570  	m := make(map[common.Address]consensus.Peer)
  1571  	for addr, p := range pm.peers.ENPeers() {
  1572  		m[addr] = p
  1573  	}
  1574  	return m
  1575  }
  1576  
  1577  func (pm *ProtocolManager) GetPeers() []common.Address {
  1578  	addrs := make([]common.Address, 0)
  1579  	for _, p := range pm.peers.Peers() {
  1580  		addr := p.GetAddr()
  1581  		if addr == (common.Address{}) {
  1582  			pubKey, err := p.GetP2PPeerID().Pubkey()
  1583  			if err != nil {
  1584  				continue
  1585  			}
  1586  			addr = crypto.PubkeyToAddress(*pubKey)
  1587  			p.SetAddr(addr)
  1588  		}
  1589  		addrs = append(addrs, addr)
  1590  	}
  1591  	return addrs
  1592  }
  1593  
  1594  func (pm *ProtocolManager) Downloader() ProtocolManagerDownloader {
  1595  	return pm.downloader
  1596  }
  1597  
  1598  func (pm *ProtocolManager) SetWsEndPoint(wsep string) {
  1599  	pm.wsendpoint = wsep
  1600  }
  1601  
  1602  func (pm *ProtocolManager) GetSubProtocols() []p2p.Protocol {
  1603  	return pm.SubProtocols
  1604  }
  1605  
  1606  func (pm *ProtocolManager) ProtocolVersion() int {
  1607  	return int(pm.SubProtocols[0].Version)
  1608  }
  1609  
  1610  func (pm *ProtocolManager) SetAcceptTxs() {
  1611  	atomic.StoreUint32(&pm.acceptTxs, 1)
  1612  }
  1613  
  1614  func (pm *ProtocolManager) NodeType() common.ConnType {
  1615  	return pm.nodetype
  1616  }