github.com/klaytn/klaytn@v1.12.1/node/cn/peer.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of go-ethereum.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/peer.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package cn
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"math/big"
    27  	"sort"
    28  	"sync"
    29  	"sync/atomic"
    30  	"time"
    31  
    32  	"github.com/klaytn/klaytn/blockchain/types"
    33  	"github.com/klaytn/klaytn/common"
    34  	"github.com/klaytn/klaytn/consensus"
    35  	"github.com/klaytn/klaytn/crypto"
    36  	"github.com/klaytn/klaytn/datasync/downloader"
    37  	"github.com/klaytn/klaytn/networks/p2p"
    38  	"github.com/klaytn/klaytn/networks/p2p/discover"
    39  	"github.com/klaytn/klaytn/node/cn/snap"
    40  	"github.com/klaytn/klaytn/rlp"
    41  )
    42  
    43  var (
    44  	errClosed                 = errors.New("peer set is closed")
    45  	errAlreadyRegistered      = errors.New("peer is already registered")
    46  	errNotRegistered          = errors.New("peer is not registered")
    47  	errUnexpectedNodeType     = errors.New("unexpected node type of peer")
    48  	errNotSupportedByBasePeer = errors.New("not supported by basePeer")
    49  )
    50  
    51  const (
    52  	maxKnownTxs    = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
    53  	maxKnownBlocks = 1024  // Maximum block hashes to keep in the known list (prevent DOS)
    54  
    55  	// maxQueuedTxs is the maximum number of transaction lists to queue up before
    56  	// dropping broadcasts. This is a sensitive number as a transaction list might
    57  	// contain a single transaction, or thousands.
    58  	maxQueuedTxs = 128
    59  
    60  	// maxQueuedProps is the maximum number of block propagations to queue up before
    61  	// dropping broadcasts. There's not much point in queueing stale blocks, so a few
    62  	// that might cover uncles should be enough.
    63  	// TODO-Klaytn-Refactoring Look into the usage of maxQueuedProps and remove it if needed
    64  	maxQueuedProps = 4
    65  
    66  	// maxQueuedAnns is the maximum number of block announcements to queue up before
    67  	// dropping broadcasts. Similarly to block propagations, there's no point to queue
    68  	// above some healthy uncle limit, so use that.
    69  	// TODO-Klaytn-Refactoring Look into the usage of maxQueuedAnns and remove it if needed
    70  	maxQueuedAnns = 4
    71  
    72  	handshakeTimeout = 5 * time.Second
    73  )
    74  
    75  // PeerInfo represents a short summary of the Klaytn sub-protocol metadata known
    76  // about a connected peer.
    77  type PeerInfo struct {
    78  	Version    int      `json:"version"`    // Klaytn protocol version negotiated
    79  	BlockScore *big.Int `json:"blockscore"` // Total blockscore of the peer's blockchain
    80  	Head       string   `json:"head"`       // SHA3 hash of the peer's best owned block
    81  }
    82  
    83  // propEvent is a block propagation, waiting for its turn in the broadcast queue.
    84  type propEvent struct {
    85  	block *types.Block
    86  	td    *big.Int
    87  }
    88  
    89  //go:generate mockgen -destination=node/cn/peer_mock_test.go -package=cn github.com/klaytn/klaytn/node/cn Peer
    90  type Peer interface {
    91  	// Broadcast is a write loop that multiplexes block propagations, announcements
    92  	// and transaction broadcasts into the remote peer. The goal is to have an async
    93  	// writer that does not lock up node internals.
    94  	Broadcast()
    95  
    96  	// Close signals the broadcast goroutine to terminate.
    97  	Close()
    98  
    99  	// Info gathers and returns a collection of metadata known about a peer.
   100  	Info() *PeerInfo
   101  
   102  	// SetHead updates the head hash and total blockscore of the peer.
   103  	SetHead(hash common.Hash, td *big.Int)
   104  
   105  	// AddToKnownBlocks adds a block hash to knownBlocksCache for the peer, ensuring that the block will
   106  	// never be propagated to this particular peer.
   107  	AddToKnownBlocks(hash common.Hash)
   108  
   109  	// AddToKnownTxs adds a transaction hash to knownTxsCache for the peer, ensuring that it
   110  	// will never be propagated to this particular peer.
   111  	AddToKnownTxs(hash common.Hash)
   112  
   113  	// Send writes an RLP-encoded message with the given code.
   114  	// data should have been encoded as an RLP list.
   115  	Send(msgcode uint64, data interface{}) error
   116  
   117  	// SendTransactions sends transactions to the peer and includes the hashes
   118  	// in its transaction hash set for future reference.
   119  	SendTransactions(txs types.Transactions) error
   120  
   121  	// ReSendTransactions sends txs to a peer in order to prevent the txs from missing.
   122  	ReSendTransactions(txs types.Transactions) error
   123  
   124  	// AsyncSendTransactions sends transactions asynchronously to the peer.
   125  	AsyncSendTransactions(txs types.Transactions)
   126  
   127  	// SendNewBlockHashes announces the availability of a number of blocks through
   128  	// a hash notification.
   129  	SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error
   130  
   131  	// AsyncSendNewBlockHash queues the availability of a block for propagation to a
   132  	// remote peer. If the peer's broadcast queue is full, the event is silently
   133  	// dropped.
   134  	AsyncSendNewBlockHash(block *types.Block)
   135  
   136  	// SendNewBlock propagates an entire block to a remote peer.
   137  	SendNewBlock(block *types.Block, td *big.Int) error
   138  
   139  	// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If
   140  	// the peer's broadcast queue is full, the event is silently dropped.
   141  	AsyncSendNewBlock(block *types.Block, td *big.Int)
   142  
   143  	// SendBlockHeaders sends a batch of block headers to the remote peer.
   144  	SendBlockHeaders(headers []*types.Header) error
   145  
   146  	// SendFetchedBlockHeader sends a block header to the remote peer, requested by fetcher.
   147  	SendFetchedBlockHeader(header *types.Header) error
   148  
   149  	// SendBlockBodies sends a batch of block contents to the remote peer.
   150  	SendBlockBodies(bodies []*blockBody) error
   151  
   152  	// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
   153  	// an already RLP encoded format.
   154  	SendBlockBodiesRLP(bodies []rlp.RawValue) error
   155  
   156  	// SendFetchedBlockBodiesRLP sends a batch of block contents to the remote peer from
   157  	// an already RLP encoded format, requested by fetcher.
   158  	SendFetchedBlockBodiesRLP(bodies []rlp.RawValue) error
   159  
   160  	// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
   161  	// hashes requested.
   162  	SendNodeData(data [][]byte) error
   163  
   164  	// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
   165  	// ones requested from an already RLP encoded format.
   166  	SendReceiptsRLP(receipts []rlp.RawValue) error
   167  
   168  	// SendStakingInfoRLP sends a batch of staking information, corresponding to the
   169  	// ones requested from an already RLP encoded format.
   170  	SendStakingInfoRLP(stakingInfos []rlp.RawValue) error
   171  
   172  	// FetchBlockHeader is a wrapper around the header query functions to fetch a
   173  	// single header. It is used solely by the fetcher.
   174  	FetchBlockHeader(hash common.Hash) error
   175  
   176  	// FetchBlockBodies fetches a batch of blocks' bodies corresponding to the hashes
   177  	// specified. If uses different message type from RequestBodies.
   178  	// It is used solely by the fetcher.
   179  	FetchBlockBodies(hashes []common.Hash) error
   180  
   181  	// Handshake executes the Klaytn protocol handshake, negotiating version number,
   182  	// network IDs, difficulties, head, and genesis blocks and returning error.
   183  	Handshake(network uint64, chainID, td *big.Int, head common.Hash, genesis common.Hash) error
   184  
   185  	// ConnType returns the conntype of the peer.
   186  	ConnType() common.ConnType
   187  
   188  	// GetID returns the id of the peer.
   189  	GetID() string
   190  
   191  	// GetP2PPeerID returns the id of the p2p.Peer.
   192  	GetP2PPeerID() discover.NodeID
   193  
   194  	// GetChainID returns the chain id of the peer.
   195  	GetChainID() *big.Int
   196  
   197  	// GetAddr returns the address of the peer.
   198  	GetAddr() common.Address
   199  
   200  	// SetAddr sets the address of the peer.
   201  	SetAddr(addr common.Address)
   202  
   203  	// GetVersion returns the version of the peer.
   204  	GetVersion() int
   205  
   206  	// KnowsBlock returns if the peer is known to have the block, based on knownBlocksCache.
   207  	KnowsBlock(hash common.Hash) bool
   208  
   209  	// KnowsTx returns if the peer is known to have the transaction, based on knownTxsCache.
   210  	KnowsTx(hash common.Hash) bool
   211  
   212  	// GetP2PPeer returns the p2p.
   213  	GetP2PPeer() *p2p.Peer
   214  
   215  	// DisconnectP2PPeer disconnects the p2p peer with the given reason.
   216  	DisconnectP2PPeer(discReason p2p.DiscReason)
   217  
   218  	// GetRW returns the MsgReadWriter of the peer.
   219  	GetRW() p2p.MsgReadWriter
   220  
   221  	// Handle is the callback invoked to manage the life cycle of a Klaytn Peer. When
   222  	// this function terminates, the Peer is disconnected.
   223  	Handle(pm *ProtocolManager) error
   224  
   225  	// UpdateRWImplementationVersion updates the version of the implementation of RW.
   226  	UpdateRWImplementationVersion()
   227  
   228  	// Peer encapsulates the methods required to synchronise with a remote full peer.
   229  	downloader.Peer
   230  
   231  	// RegisterConsensusMsgCode registers the channel of consensus msg.
   232  	RegisterConsensusMsgCode(msgCode uint64) error
   233  
   234  	// RunningCap returns true if the peer is actively connected using any of the
   235  	// enumerated versions of a specific protocol, meaning that at least one of the
   236  	// versions is supported by both this node and the peer p.
   237  	RunningCap(protocol string, versions []uint) bool
   238  
   239  	// AddSnapExtension extends the peer to support snap protocol.
   240  	AddSnapExtension(peer *snap.Peer)
   241  
   242  	// ExistSnapExtension returns true if the peer supports snap protocol.
   243  	ExistSnapExtension() bool
   244  }
   245  
   246  // basePeer is a common data structure used by implementation of Peer.
   247  type basePeer struct {
   248  	id string
   249  
   250  	addr common.Address
   251  
   252  	*p2p.Peer
   253  	rw p2p.MsgReadWriter
   254  
   255  	version  int         // Protocol version negotiated
   256  	forkDrop *time.Timer // Timed connection dropper if forks aren't validated in time
   257  
   258  	head common.Hash
   259  	td   *big.Int
   260  	lock sync.RWMutex
   261  
   262  	knownTxsCache    common.Cache              // FIFO cache of transaction hashes known to be known by this peer
   263  	knownBlocksCache common.Cache              // FIFO cache of block hashes known to be known by this peer
   264  	queuedTxs        chan []*types.Transaction // Queue of transactions to broadcast to the peer
   265  	queuedProps      chan *propEvent           // Queue of blocks to broadcast to the peer
   266  	queuedAnns       chan *types.Block         // Queue of blocks to announce to the peer
   267  	term             chan struct{}             // Termination channel to stop the broadcaster
   268  
   269  	chainID *big.Int // ChainID to sign a transaction
   270  
   271  	snapExt *snap.Peer // Satellite `snap` connection
   272  }
   273  
   274  // newKnownBlockCache returns an empty cache for knownBlocksCache.
   275  func newKnownBlockCache() common.Cache {
   276  	return common.NewCache(common.FIFOCacheConfig{CacheSize: maxKnownBlocks, IsScaled: true})
   277  }
   278  
   279  // newKnownTxCache returns an empty cache for knownTxsCache.
   280  func newKnownTxCache() common.Cache {
   281  	return common.NewCache(common.FIFOCacheConfig{CacheSize: maxKnownTxs, IsScaled: true})
   282  }
   283  
   284  // newPeer returns new Peer interface.
   285  func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) Peer {
   286  	id := p.ID()
   287  
   288  	return &singleChannelPeer{
   289  		basePeer: &basePeer{
   290  			Peer:             p,
   291  			rw:               rw,
   292  			version:          version,
   293  			id:               fmt.Sprintf("%x", id[:8]),
   294  			knownTxsCache:    newKnownTxCache(),
   295  			knownBlocksCache: newKnownBlockCache(),
   296  			queuedTxs:        make(chan []*types.Transaction, maxQueuedTxs),
   297  			queuedProps:      make(chan *propEvent, maxQueuedProps),
   298  			queuedAnns:       make(chan *types.Block, maxQueuedAnns),
   299  			term:             make(chan struct{}),
   300  		},
   301  	}
   302  }
   303  
   304  // ChannelOfMessage is a map with the index of the channel per message
   305  var ChannelOfMessage = map[uint64]int{
   306  	StatusMsg:                   p2p.ConnDefault, // StatusMsg's Channel should to be set ConnDefault
   307  	NewBlockHashesMsg:           p2p.ConnDefault,
   308  	BlockHeaderFetchRequestMsg:  p2p.ConnDefault,
   309  	BlockHeaderFetchResponseMsg: p2p.ConnDefault,
   310  	BlockBodiesFetchRequestMsg:  p2p.ConnDefault,
   311  	BlockBodiesFetchResponseMsg: p2p.ConnDefault,
   312  	TxMsg:                       p2p.ConnTxMsg,
   313  	BlockHeadersRequestMsg:      p2p.ConnDefault,
   314  	BlockHeadersMsg:             p2p.ConnDefault,
   315  	BlockBodiesRequestMsg:       p2p.ConnDefault,
   316  	BlockBodiesMsg:              p2p.ConnDefault,
   317  	NewBlockMsg:                 p2p.ConnDefault,
   318  
   319  	// Protocol messages belonging to klay/63
   320  	NodeDataRequestMsg: p2p.ConnDefault,
   321  	NodeDataMsg:        p2p.ConnDefault,
   322  	ReceiptsRequestMsg: p2p.ConnDefault,
   323  	ReceiptsMsg:        p2p.ConnDefault,
   324  
   325  	// Protocol messages belonging to klay/65
   326  	StakingInfoRequestMsg: p2p.ConnDefault,
   327  	StakingInfoMsg:        p2p.ConnDefault,
   328  }
   329  
   330  var ConcurrentOfChannel = []int{
   331  	p2p.ConnDefault: 1,
   332  	p2p.ConnTxMsg:   3,
   333  }
   334  
   335  // newPeerWithRWs creates a new Peer object with a slice of p2p.MsgReadWriter.
   336  func newPeerWithRWs(version int, p *p2p.Peer, rws []p2p.MsgReadWriter) (Peer, error) {
   337  	id := p.ID()
   338  
   339  	lenRWs := len(rws)
   340  	if lenRWs == 1 {
   341  		return newPeer(version, p, rws[p2p.ConnDefault]), nil
   342  	} else if lenRWs > 1 {
   343  		bPeer := &basePeer{
   344  			Peer:             p,
   345  			rw:               rws[p2p.ConnDefault],
   346  			version:          version,
   347  			id:               fmt.Sprintf("%x", id[:8]),
   348  			knownTxsCache:    newKnownTxCache(),
   349  			knownBlocksCache: newKnownBlockCache(),
   350  			queuedTxs:        make(chan []*types.Transaction, maxQueuedTxs),
   351  			queuedProps:      make(chan *propEvent, maxQueuedProps),
   352  			queuedAnns:       make(chan *types.Block, maxQueuedAnns),
   353  			term:             make(chan struct{}),
   354  		}
   355  		return &multiChannelPeer{
   356  			basePeer: bPeer,
   357  			rws:      rws,
   358  			chMgr:    NewChannelManager(len(rws)),
   359  		}, nil
   360  	} else {
   361  		return nil, errors.New("len(rws) should be greater than zero.")
   362  	}
   363  }
   364  
   365  // Broadcast is a write loop that multiplexes block propagations, announcements
   366  // and transaction broadcasts into the remote peer. The goal is to have an async
   367  // writer that does not lock up node internals.
   368  func (p *basePeer) Broadcast() {
   369  	for {
   370  		select {
   371  		case txs := <-p.queuedTxs:
   372  			if err := p.SendTransactions(txs); err != nil {
   373  				logger.Error("fail to SendTransactions", "peer", p.id, "err", err)
   374  				continue
   375  				// return
   376  			}
   377  			p.Log().Trace("Broadcast transactions", "peer", p.id, "count", len(txs))
   378  
   379  		case prop := <-p.queuedProps:
   380  			if err := p.SendNewBlock(prop.block, prop.td); err != nil {
   381  				logger.Error("fail to SendNewBlock", "peer", p.id, "err", err)
   382  				continue
   383  				// return
   384  			}
   385  			p.Log().Trace("Propagated block", "peer", p.id, "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
   386  
   387  		case block := <-p.queuedAnns:
   388  			if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
   389  				logger.Error("fail to SendNewBlockHashes", "peer", p.id, "err", err)
   390  				continue
   391  				// return
   392  			}
   393  			p.Log().Trace("Announced block", "peer", p.id, "number", block.Number(), "hash", block.Hash())
   394  
   395  		case <-p.term:
   396  			p.Log().Debug("Peer broadcast loop end", "peer", p.id)
   397  			return
   398  		}
   399  	}
   400  }
   401  
   402  // Close signals the broadcast goroutine to terminate.
   403  func (p *basePeer) Close() {
   404  	close(p.term)
   405  }
   406  
   407  // Info gathers and returns a collection of metadata known about a peer.
   408  func (p *basePeer) Info() *PeerInfo {
   409  	hash, td := p.Head()
   410  
   411  	return &PeerInfo{
   412  		Version:    p.version,
   413  		BlockScore: td,
   414  		Head:       hash.Hex(),
   415  	}
   416  }
   417  
   418  // Head retrieves a copy of the current head hash and total blockscore of the
   419  // peer.
   420  func (p *basePeer) Head() (hash common.Hash, td *big.Int) {
   421  	p.lock.RLock()
   422  	defer p.lock.RUnlock()
   423  
   424  	copy(hash[:], p.head[:])
   425  	return hash, new(big.Int).Set(p.td)
   426  }
   427  
   428  // SetHead updates the head hash and total blockscore of the peer.
   429  func (p *basePeer) SetHead(hash common.Hash, td *big.Int) {
   430  	p.lock.Lock()
   431  	defer p.lock.Unlock()
   432  
   433  	copy(p.head[:], hash[:])
   434  	p.td.Set(td)
   435  }
   436  
   437  // AddToKnownBlocks adds a block hash to knownBlocksCache for the peer, ensuring that the block will
   438  // never be propagated to this particular peer.
   439  func (p *basePeer) AddToKnownBlocks(hash common.Hash) {
   440  	p.knownBlocksCache.Add(hash, struct{}{})
   441  }
   442  
   443  // AddToKnownTxs adds a transaction hash to knownTxsCache for the peer, ensuring that it
   444  // will never be propagated to this particular peer.
   445  func (p *basePeer) AddToKnownTxs(hash common.Hash) {
   446  	p.knownTxsCache.Add(hash, struct{}{})
   447  }
   448  
   449  // Send writes an RLP-encoded message with the given code.
   450  // data should have been encoded as an RLP list.
   451  func (p *basePeer) Send(msgcode uint64, data interface{}) error {
   452  	return p2p.Send(p.rw, msgcode, data)
   453  }
   454  
   455  // SendTransactions sends transactions to the peer and includes the hashes
   456  // in its transaction hash set for future reference.
   457  func (p *basePeer) SendTransactions(txs types.Transactions) error {
   458  	// Before sending transactions, sort transactions in ascending order by time.
   459  	if !sort.IsSorted(types.TxByTime(txs)) {
   460  		sort.Sort(types.TxByTime(txs))
   461  	}
   462  
   463  	for _, tx := range txs {
   464  		p.AddToKnownTxs(tx.Hash())
   465  	}
   466  	return p2p.Send(p.rw, TxMsg, txs)
   467  }
   468  
   469  // ReSendTransactions sends txs to a peer in order to prevent the txs from missing.
   470  func (p *basePeer) ReSendTransactions(txs types.Transactions) error {
   471  	// Before sending transactions, sort transactions in ascending order by time.
   472  	if !sort.IsSorted(types.TxByTime(txs)) {
   473  		sort.Sort(types.TxByTime(txs))
   474  	}
   475  
   476  	return p2p.Send(p.rw, TxMsg, txs)
   477  }
   478  
   479  func (p *basePeer) AsyncSendTransactions(txs types.Transactions) {
   480  	select {
   481  	case p.queuedTxs <- txs:
   482  		for _, tx := range txs {
   483  			p.AddToKnownTxs(tx.Hash())
   484  		}
   485  	default:
   486  		p.Log().Trace("Dropping transaction propagation", "count", len(txs))
   487  	}
   488  }
   489  
   490  // SendNewBlockHashes announces the availability of a number of blocks through
   491  // a hash notification.
   492  func (p *basePeer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
   493  	for _, hash := range hashes {
   494  		p.AddToKnownBlocks(hash)
   495  	}
   496  	request := make(newBlockHashesData, len(hashes))
   497  	for i := 0; i < len(hashes); i++ {
   498  		request[i].Hash = hashes[i]
   499  		request[i].Number = numbers[i]
   500  	}
   501  	return p2p.Send(p.rw, NewBlockHashesMsg, request)
   502  }
   503  
   504  // AsyncSendNewBlockHash queues the availability of a block for propagation to a
   505  // remote peer. If the peer's broadcast queue is full, the event is silently
   506  // dropped.
   507  func (p *basePeer) AsyncSendNewBlockHash(block *types.Block) {
   508  	select {
   509  	case p.queuedAnns <- block:
   510  		p.AddToKnownBlocks(block.Hash())
   511  	default:
   512  		p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
   513  	}
   514  }
   515  
   516  // SendNewBlock propagates an entire block to a remote peer.
   517  func (p *basePeer) SendNewBlock(block *types.Block, td *big.Int) error {
   518  	p.AddToKnownBlocks(block.Hash())
   519  	return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
   520  }
   521  
   522  // AsyncSendNewBlock queues an entire block for propagation to a remote peer. If
   523  // the peer's broadcast queue is full, the event is silently dropped.
   524  func (p *basePeer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
   525  	select {
   526  	case p.queuedProps <- &propEvent{block: block, td: td}:
   527  		p.AddToKnownBlocks(block.Hash())
   528  	default:
   529  		p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
   530  	}
   531  }
   532  
   533  // SendBlockHeaders sends a batch of block headers to the remote peer.
   534  func (p *basePeer) SendBlockHeaders(headers []*types.Header) error {
   535  	return p2p.Send(p.rw, BlockHeadersMsg, headers)
   536  }
   537  
   538  // SendFetchedBlockHeader sends a block header to the remote peer, requested by fetcher.
   539  func (p *basePeer) SendFetchedBlockHeader(header *types.Header) error {
   540  	return p2p.Send(p.rw, BlockHeaderFetchResponseMsg, header)
   541  }
   542  
   543  // SendBlockBodies sends a batch of block contents to the remote peer.
   544  func (p *basePeer) SendBlockBodies(bodies []*blockBody) error {
   545  	return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies))
   546  }
   547  
   548  // SendBlockBodiesRLP sends a batch of block contents to the remote peer from
   549  // an already RLP encoded format.
   550  func (p *basePeer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
   551  	return p2p.Send(p.rw, BlockBodiesMsg, bodies)
   552  }
   553  
   554  // SendFetchedBlockBodiesRLP sends a batch of block contents to the remote peer from
   555  // an already RLP encoded format.
   556  func (p *basePeer) SendFetchedBlockBodiesRLP(bodies []rlp.RawValue) error {
   557  	return p2p.Send(p.rw, BlockBodiesFetchResponseMsg, bodies)
   558  }
   559  
   560  // SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
   561  // hashes requested.
   562  func (p *basePeer) SendNodeData(data [][]byte) error {
   563  	return p2p.Send(p.rw, NodeDataMsg, data)
   564  }
   565  
   566  // SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
   567  // ones requested from an already RLP encoded format.
   568  func (p *basePeer) SendReceiptsRLP(receipts []rlp.RawValue) error {
   569  	return p2p.Send(p.rw, ReceiptsMsg, receipts)
   570  }
   571  
   572  // SendStakingInfoRLP sends a batch of staking information, corresponding to the
   573  // ones requested from an already RLP encoded format.
   574  func (p *basePeer) SendStakingInfoRLP(stakingInfos []rlp.RawValue) error {
   575  	return p2p.Send(p.rw, StakingInfoMsg, stakingInfos)
   576  }
   577  
   578  // FetchBlockHeader is a wrapper around the header query functions to fetch a
   579  // single header. It is used solely by the fetcher.
   580  func (p *basePeer) FetchBlockHeader(hash common.Hash) error {
   581  	p.Log().Debug("Fetching a new block header", "hash", hash)
   582  	return p2p.Send(p.rw, BlockHeaderFetchRequestMsg, hash)
   583  }
   584  
   585  // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
   586  // specified header query, based on the hash of an origin block.
   587  func (p *basePeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   588  	p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
   589  	return p2p.Send(p.rw, BlockHeadersRequestMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
   590  }
   591  
   592  // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
   593  // specified header query, based on the number of an origin block.
   594  func (p *basePeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   595  	p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
   596  	return p2p.Send(p.rw, BlockHeadersRequestMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
   597  }
   598  
   599  // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
   600  // specified.
   601  func (p *basePeer) RequestBodies(hashes []common.Hash) error {
   602  	p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
   603  	return p2p.Send(p.rw, BlockBodiesRequestMsg, hashes)
   604  }
   605  
   606  // FetchBlockBodies fetches a batch of blocks' bodies corresponding to the hashes
   607  // specified. If uses different message type from RequestBodies.
   608  func (p *basePeer) FetchBlockBodies(hashes []common.Hash) error {
   609  	p.Log().Debug("Fetching batch of new block bodies", "count", len(hashes))
   610  	return p2p.Send(p.rw, BlockBodiesFetchRequestMsg, hashes)
   611  }
   612  
   613  // RequestNodeData fetches a batch of arbitrary data from a node's known state
   614  // data, corresponding to the specified hashes.
   615  func (p *basePeer) RequestNodeData(hashes []common.Hash) error {
   616  	p.Log().Debug("Fetching batch of state data", "count", len(hashes))
   617  	return p2p.Send(p.rw, NodeDataRequestMsg, hashes)
   618  }
   619  
   620  // RequestReceipts fetches a batch of transaction receipts from a remote node.
   621  func (p *basePeer) RequestReceipts(hashes []common.Hash) error {
   622  	p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
   623  	return p2p.Send(p.rw, ReceiptsRequestMsg, hashes)
   624  }
   625  
   626  // RequestStakingInfo fetches a batch of staking information from a remote node.
   627  func (p *basePeer) RequestStakingInfo(hashes []common.Hash) error {
   628  	p.Log().Debug("Fetching batch of staking infos", "count", len(hashes))
   629  	return p2p.Send(p.rw, StakingInfoRequestMsg, hashes)
   630  }
   631  
   632  // Handshake executes the Klaytn protocol handshake, negotiating version number,
   633  // network IDs, difficulties, head and genesis blocks.
   634  func (p *basePeer) Handshake(network uint64, chainID, td *big.Int, head common.Hash, genesis common.Hash) error {
   635  	// Send out own handshake in a new thread
   636  	errc := make(chan error, 2)
   637  	var status statusData // safe to read after two values have been received from errc
   638  
   639  	go func() {
   640  		errc <- p2p.Send(p.rw, StatusMsg, &statusData{
   641  			ProtocolVersion: uint32(p.version),
   642  			NetworkId:       network,
   643  			TD:              td,
   644  			CurrentBlock:    head,
   645  			GenesisBlock:    genesis,
   646  			ChainID:         chainID,
   647  		})
   648  	}()
   649  	go func() {
   650  		errc <- p.readStatus(network, &status, genesis, chainID)
   651  	}()
   652  	timeout := time.NewTimer(handshakeTimeout)
   653  	defer timeout.Stop()
   654  	for i := 0; i < 2; i++ {
   655  		select {
   656  		case err := <-errc:
   657  			if err != nil {
   658  				return err
   659  			}
   660  		case <-timeout.C:
   661  			return p2p.DiscReadTimeout
   662  		}
   663  	}
   664  	p.td, p.head, p.chainID = status.TD, status.CurrentBlock, status.ChainID
   665  	return nil
   666  }
   667  
   668  func (p *basePeer) readStatus(network uint64, status *statusData, genesis common.Hash, chainID *big.Int) error {
   669  	msg, err := p.rw.ReadMsg()
   670  	if err != nil {
   671  		return err
   672  	}
   673  	if msg.Code != StatusMsg {
   674  		return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
   675  	}
   676  	if msg.Size > ProtocolMaxMsgSize {
   677  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   678  	}
   679  	// Decode the handshake and make sure everything matches
   680  	if err := msg.Decode(&status); err != nil {
   681  		return errResp(ErrDecode, "msg %v: %v", msg, err)
   682  	}
   683  	if status.GenesisBlock != genesis {
   684  		return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
   685  	}
   686  	if status.NetworkId != network {
   687  		return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network)
   688  	}
   689  	if status.ChainID.Cmp(chainID) != 0 {
   690  		return errResp(ErrChainIDMismatch, "%v (!= %v)", status.ChainID.String(), chainID.String())
   691  	}
   692  	if int(status.ProtocolVersion) != p.version {
   693  		return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
   694  	}
   695  	return nil
   696  }
   697  
   698  // String implements fmt.Stringer.
   699  func (p *basePeer) String() string {
   700  	return fmt.Sprintf("Peer %s [%s]", p.id,
   701  		fmt.Sprintf("klay/%2d", p.version),
   702  	)
   703  }
   704  
   705  // ConnType returns the conntype of the peer.
   706  func (p *basePeer) ConnType() common.ConnType {
   707  	return p.Peer.ConnType()
   708  }
   709  
   710  // GetID returns the id of the peer.
   711  func (p *basePeer) GetID() string {
   712  	return p.id
   713  }
   714  
   715  // GetP2PPeerID returns the id of the p2p.Peer.
   716  func (p *basePeer) GetP2PPeerID() discover.NodeID {
   717  	return p.Peer.ID()
   718  }
   719  
   720  // GetChainID returns the chain id of the peer.
   721  func (p *basePeer) GetChainID() *big.Int {
   722  	return p.chainID
   723  }
   724  
   725  // GetAddr returns the address of the peer.
   726  func (p *basePeer) GetAddr() common.Address {
   727  	return p.addr
   728  }
   729  
   730  // SetAddr sets the address of the peer.
   731  func (p *basePeer) SetAddr(addr common.Address) {
   732  	p.addr = addr
   733  }
   734  
   735  // GetVersion returns the version of the peer.
   736  func (p *basePeer) GetVersion() int {
   737  	return p.version
   738  }
   739  
   740  // KnowsBlock returns if the peer is known to have the block, based on knownBlocksCache.
   741  func (p *basePeer) KnowsBlock(hash common.Hash) bool {
   742  	_, ok := p.knownBlocksCache.Get(hash)
   743  	return ok
   744  }
   745  
   746  // KnowsTx returns if the peer is known to have the transaction, based on knownTxsCache.
   747  func (p *basePeer) KnowsTx(hash common.Hash) bool {
   748  	_, ok := p.knownTxsCache.Get(hash)
   749  	return ok
   750  }
   751  
   752  // GetP2PPeer returns the p2p.Peer.
   753  func (p *basePeer) GetP2PPeer() *p2p.Peer {
   754  	return p.Peer
   755  }
   756  
   757  // DisconnectP2PPeer disconnects the p2p peer with the given reason.
   758  func (p *basePeer) DisconnectP2PPeer(discReason p2p.DiscReason) {
   759  	p.GetP2PPeer().Disconnect(discReason)
   760  }
   761  
   762  // GetRW returns the MsgReadWriter of the peer.
   763  func (p *basePeer) GetRW() p2p.MsgReadWriter {
   764  	return p.rw
   765  }
   766  
   767  // Handle is the callback invoked to manage the life cycle of a Klaytn Peer. When
   768  // this function terminates, the Peer is disconnected.
   769  func (p *basePeer) Handle(pm *ProtocolManager) error {
   770  	return pm.handle(p)
   771  }
   772  
   773  // UpdateRWImplementationVersion updates the version of the implementation of RW.
   774  func (p *basePeer) UpdateRWImplementationVersion() {
   775  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   776  		rw.Init(p.GetVersion())
   777  	}
   778  }
   779  
   780  // RegisterConsensusMsgCode is not supported by this peer.
   781  func (p *basePeer) RegisterConsensusMsgCode(msgCode uint64) error {
   782  	return fmt.Errorf("%v peerID: %v ", errNotSupportedByBasePeer, p.GetID())
   783  }
   784  
   785  // AddSnapExtension extends this peer to support snap protocol.
   786  func (p *basePeer) AddSnapExtension(peer *snap.Peer) {
   787  	p.snapExt = peer
   788  }
   789  
   790  // ExistSnapExtension returns true if this peer supports snap protocol.
   791  func (p *basePeer) ExistSnapExtension() bool {
   792  	return p.snapExt != nil
   793  }
   794  
   795  // singleChannelPeer is a peer that uses a single channel.
   796  type singleChannelPeer struct {
   797  	*basePeer
   798  }
   799  
   800  // multiChannelPeer is a peer that uses a multi channel.
   801  type multiChannelPeer struct {
   802  	*basePeer                     // basePeer is a set of data structures that the peer implementation has in common
   803  	rws       []p2p.MsgReadWriter // rws is a slice of p2p.MsgReadWriter for peer-to-peer transmission and reception
   804  
   805  	chMgr *ChannelManager
   806  }
   807  
   808  // RegisterMsgCode registers the channel id corresponding to msgCode.
   809  func (p *multiChannelPeer) RegisterMsgCode(channelId uint, msgCode uint64) {
   810  	p.chMgr.RegisterMsgCode(channelId, msgCode)
   811  }
   812  
   813  // RegisterConsensusMsgCode registers the channel of consensus msg.
   814  func (p *multiChannelPeer) RegisterConsensusMsgCode(msgCode uint64) error {
   815  	p.chMgr.RegisterMsgCode(ConsensusChannel, msgCode)
   816  	return nil
   817  }
   818  
   819  // Broadcast is a write loop that multiplexes block propagations, announcements
   820  // and transaction broadcasts into the remote peer. The goal is to have an async
   821  // writer that does not lock up node internals.
   822  func (p *multiChannelPeer) Broadcast() {
   823  	for {
   824  		select {
   825  		case txs := <-p.queuedTxs:
   826  			if err := p.SendTransactions(txs); err != nil {
   827  				logger.Error("fail to SendTransactions", "peer", p.id, "err", err)
   828  				continue
   829  				// return
   830  			}
   831  			p.Log().Trace("Broadcast transactions", "peer", p.id, "count", len(txs))
   832  
   833  		case prop := <-p.queuedProps:
   834  			if err := p.SendNewBlock(prop.block, prop.td); err != nil {
   835  				logger.Error("fail to SendNewBlock", "peer", p.id, "err", err)
   836  				continue
   837  				// return
   838  			}
   839  			p.Log().Trace("Propagated block", "peer", p.id, "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
   840  
   841  		case block := <-p.queuedAnns:
   842  			if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
   843  				logger.Error("fail to SendNewBlockHashes", "peer", p.id, "err", err)
   844  				continue
   845  				// return
   846  			}
   847  			p.Log().Trace("Announced block", "peer", p.id, "number", block.Number(), "hash", block.Hash())
   848  
   849  		case <-p.term:
   850  			p.Log().Debug("Peer broadcast loop end", "peer", p.id)
   851  			return
   852  		}
   853  	}
   854  }
   855  
   856  // SendTransactions sends transactions to the peer and includes the hashes
   857  // in its transaction hash set for future reference.
   858  func (p *multiChannelPeer) SendTransactions(txs types.Transactions) error {
   859  	// Before sending transactions, sort transactions in ascending order by time.
   860  	if !sort.IsSorted(types.TxByTime(txs)) {
   861  		sort.Sort(types.TxByTime(txs))
   862  	}
   863  
   864  	for _, tx := range txs {
   865  		p.AddToKnownTxs(tx.Hash())
   866  	}
   867  	return p.msgSender(TxMsg, txs)
   868  }
   869  
   870  // ReSendTransactions sends txs to a peer in order to prevent the txs from missing.
   871  func (p *multiChannelPeer) ReSendTransactions(txs types.Transactions) error {
   872  	// Before sending transactions, sort transactions in ascending order by time.
   873  	if !sort.IsSorted(types.TxByTime(txs)) {
   874  		sort.Sort(types.TxByTime(txs))
   875  	}
   876  
   877  	return p.msgSender(TxMsg, txs)
   878  }
   879  
   880  // SendNewBlockHashes announces the availability of a number of blocks through
   881  // a hash notification.
   882  func (p *multiChannelPeer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
   883  	for _, hash := range hashes {
   884  		p.AddToKnownBlocks(hash)
   885  	}
   886  	request := make(newBlockHashesData, len(hashes))
   887  	for i := 0; i < len(hashes); i++ {
   888  		request[i].Hash = hashes[i]
   889  		request[i].Number = numbers[i]
   890  	}
   891  	return p.msgSender(NewBlockHashesMsg, request)
   892  }
   893  
   894  // SendNewBlock propagates an entire block to a remote peer.
   895  func (p *multiChannelPeer) SendNewBlock(block *types.Block, td *big.Int) error {
   896  	p.AddToKnownBlocks(block.Hash())
   897  	return p.msgSender(NewBlockMsg, []interface{}{block, td})
   898  }
   899  
   900  // SendBlockHeaders sends a batch of block headers to the remote peer.
   901  func (p *multiChannelPeer) SendBlockHeaders(headers []*types.Header) error {
   902  	return p.msgSender(BlockHeadersMsg, headers)
   903  }
   904  
   905  // SendFetchedBlockHeader sends a block header to the remote peer, requested by fetcher.
   906  func (p *multiChannelPeer) SendFetchedBlockHeader(header *types.Header) error {
   907  	return p.msgSender(BlockHeaderFetchResponseMsg, header)
   908  }
   909  
   910  // SendBlockBodies sends a batch of block contents to the remote peer.
   911  func (p *multiChannelPeer) SendBlockBodies(bodies []*blockBody) error {
   912  	return p.msgSender(BlockBodiesMsg, blockBodiesData(bodies))
   913  }
   914  
   915  // SendBlockBodiesRLP sends a batch of block contents to the remote peer from
   916  // an already RLP encoded format.
   917  func (p *multiChannelPeer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
   918  	return p.msgSender(BlockBodiesMsg, bodies)
   919  }
   920  
   921  // SendFetchedBlockBodiesRLP sends a batch of block contents to the remote peer from
   922  // an already RLP encoded format.
   923  func (p *multiChannelPeer) SendFetchedBlockBodiesRLP(bodies []rlp.RawValue) error {
   924  	return p.msgSender(BlockBodiesFetchResponseMsg, bodies)
   925  }
   926  
   927  // SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
   928  // hashes requested.
   929  func (p *multiChannelPeer) SendNodeData(data [][]byte) error {
   930  	return p.msgSender(NodeDataMsg, data)
   931  }
   932  
   933  // SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
   934  // ones requested from an already RLP encoded format.
   935  func (p *multiChannelPeer) SendReceiptsRLP(receipts []rlp.RawValue) error {
   936  	return p.msgSender(ReceiptsMsg, receipts)
   937  }
   938  
   939  // SendStakingInfoRLP sends a batch of staking information, corresponding to the
   940  // ones requested from an already RLP encoded format.
   941  func (p *multiChannelPeer) SendStakingInfoRLP(stakingInfos []rlp.RawValue) error {
   942  	return p.msgSender(StakingInfoMsg, stakingInfos)
   943  }
   944  
   945  // FetchBlockHeader is a wrapper around the header query functions to fetch a
   946  // single header. It is used solely by the fetcher.
   947  func (p *multiChannelPeer) FetchBlockHeader(hash common.Hash) error {
   948  	p.Log().Debug("Fetching a new block header", "hash", hash)
   949  	return p.msgSender(BlockHeaderFetchRequestMsg, hash)
   950  }
   951  
   952  // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
   953  // specified header query, based on the hash of an origin block.
   954  func (p *multiChannelPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   955  	p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
   956  	return p.msgSender(BlockHeadersRequestMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
   957  }
   958  
   959  // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
   960  // specified header query, based on the number of an origin block.
   961  func (p *multiChannelPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   962  	p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
   963  	return p.msgSender(BlockHeadersRequestMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
   964  }
   965  
   966  // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
   967  // specified.
   968  func (p *multiChannelPeer) RequestBodies(hashes []common.Hash) error {
   969  	p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
   970  	return p.msgSender(BlockBodiesRequestMsg, hashes)
   971  }
   972  
   973  // FetchBlockBodies fetches a batch of blocks' bodies corresponding to the hashes
   974  // specified.
   975  func (p *multiChannelPeer) FetchBlockBodies(hashes []common.Hash) error {
   976  	p.Log().Debug("Fetching batch of new block bodies", "count", len(hashes))
   977  	return p.msgSender(BlockBodiesFetchRequestMsg, hashes)
   978  }
   979  
   980  // RequestNodeData fetches a batch of arbitrary data from a node's known state
   981  // data, corresponding to the specified hashes.
   982  func (p *multiChannelPeer) RequestNodeData(hashes []common.Hash) error {
   983  	p.Log().Debug("Fetching batch of state data", "count", len(hashes))
   984  	return p.msgSender(NodeDataRequestMsg, hashes)
   985  }
   986  
   987  // RequestReceipts fetches a batch of transaction receipts from a remote node.
   988  func (p *multiChannelPeer) RequestReceipts(hashes []common.Hash) error {
   989  	p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
   990  	return p.msgSender(ReceiptsRequestMsg, hashes)
   991  }
   992  
   993  // RequestStakingInfo fetches a batch of staking information from a remote node.
   994  func (p *multiChannelPeer) RequestStakingInfo(hashes []common.Hash) error {
   995  	p.Log().Debug("Fetching batch of staking infos", "count", len(hashes))
   996  	return p.msgSender(StakingInfoRequestMsg, hashes)
   997  }
   998  
   999  // msgSender sends data to the peer.
  1000  func (p *multiChannelPeer) msgSender(msgcode uint64, data interface{}) error {
  1001  	if ch, ok := ChannelOfMessage[msgcode]; ok && len(p.rws) > ch {
  1002  		return p2p.Send(p.rws[ch], msgcode, data)
  1003  	} else {
  1004  		return errors.New("RW not found for message")
  1005  	}
  1006  }
  1007  
  1008  // GetRW returns the MsgReadWriter of the peer.
  1009  func (p *multiChannelPeer) GetRW() p2p.MsgReadWriter {
  1010  	return p.rw // TODO-Klaytn check this function usage
  1011  }
  1012  
  1013  // UpdateRWImplementationVersion updates the version of the implementation of RW.
  1014  func (p *multiChannelPeer) UpdateRWImplementationVersion() {
  1015  	for _, rw := range p.rws {
  1016  		if rw, ok := rw.(*meteredMsgReadWriter); ok {
  1017  			rw.Init(p.GetVersion())
  1018  		}
  1019  	}
  1020  	p.basePeer.UpdateRWImplementationVersion()
  1021  }
  1022  
  1023  func (p *multiChannelPeer) ReadMsg(rw p2p.MsgReadWriter, connectionOrder int, errCh chan<- error, wg *sync.WaitGroup, closed <-chan struct{}) {
  1024  	defer wg.Done()
  1025  
  1026  	readMsgCh := make(chan struct {
  1027  		p2p.Msg
  1028  		error
  1029  	}, channelSizePerPeer)
  1030  	go func() {
  1031  		for {
  1032  			// TODO-klaytn: check 30-second timeout works
  1033  			msg, err := rw.ReadMsg()
  1034  			select {
  1035  			case <-closed:
  1036  				return
  1037  			case readMsgCh <- struct {
  1038  				p2p.Msg
  1039  				error
  1040  			}{msg, err}:
  1041  			}
  1042  		}
  1043  	}()
  1044  
  1045  	for {
  1046  		var (
  1047  			msg p2p.Msg
  1048  			err error
  1049  		)
  1050  		select {
  1051  		case pair := <-readMsgCh:
  1052  			msg, err = pair.Msg, pair.error
  1053  		case <-closed:
  1054  			return
  1055  		}
  1056  
  1057  		if err != nil {
  1058  			p.GetP2PPeer().Log().Warn("ProtocolManager failed to read msg", "err", err)
  1059  			errCh <- err
  1060  			return
  1061  		}
  1062  		msgCh, err := p.chMgr.GetChannelWithMsgCode(connectionOrder, msg.Code)
  1063  		if err != nil {
  1064  			p.GetP2PPeer().Log().Warn("ProtocolManager failed to get msg channel", "err", err)
  1065  			errCh <- err
  1066  			return
  1067  		}
  1068  		if msg.Size > ProtocolMaxMsgSize {
  1069  			err = errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
  1070  			p.GetP2PPeer().Log().Warn("ProtocolManager over max msg size", "err", err)
  1071  			errCh <- err
  1072  			return
  1073  		}
  1074  		select {
  1075  		case msgCh <- msg:
  1076  		case <-closed:
  1077  			return
  1078  		}
  1079  	}
  1080  }
  1081  
  1082  // Handle is the callback invoked to manage the life cycle of a Klaytn Peer. When
  1083  // this function terminates, the Peer is disconnected.
  1084  func (p *multiChannelPeer) Handle(pm *ProtocolManager) error {
  1085  	// If the peer has a `snap` extension, wait for it to connect so we can have
  1086  	// a uniform initialization/teardown mechanism
  1087  	snap, err := pm.peers.WaitSnapExtension(p)
  1088  	if err != nil {
  1089  		p.GetP2PPeer().Log().Error("Snapshot extension barrier failed", "err", err)
  1090  		return err
  1091  	}
  1092  
  1093  	// Ignore maxPeers if this is a trusted peer
  1094  	if pm.peers.Len() >= pm.maxPeers && !p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
  1095  		return p2p.DiscTooManyPeers
  1096  	}
  1097  	p.GetP2PPeer().Log().Debug("Klaytn peer connected", "name", p.GetP2PPeer().Name())
  1098  
  1099  	pm.peerWg.Add(1)
  1100  	defer pm.peerWg.Done()
  1101  
  1102  	// Execute the handshake
  1103  	var (
  1104  		genesis = pm.blockchain.Genesis()
  1105  		head    = pm.blockchain.CurrentHeader()
  1106  		hash    = head.Hash()
  1107  		number  = head.Number.Uint64()
  1108  		td      = pm.blockchain.GetTd(hash, number)
  1109  	)
  1110  
  1111  	if err := p.Handshake(pm.networkId, pm.getChainID(), td, hash, genesis.Hash()); err != nil {
  1112  		p.GetP2PPeer().Log().Debug("Klaytn peer handshake failed", "err", err)
  1113  		return err
  1114  	}
  1115  	reject := false
  1116  	if atomic.LoadUint32(&pm.snapSync) == 1 {
  1117  		if snap == nil {
  1118  			// If we are running snap-sync, we want to reserve roughly half the peer
  1119  			// slots for peers supporting the snap protocol.
  1120  			// The logic here is; we only allow up to 5 more non-snap peers than snap-peers.
  1121  			if all, snp := pm.peers.Len(), pm.peers.SnapLen(); all-snp > snp+5 {
  1122  				reject = true
  1123  			}
  1124  		}
  1125  	}
  1126  	// Ignore maxPeers if this is a trusted peer
  1127  	if p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
  1128  		if reject || pm.peers.Len() >= pm.maxPeers {
  1129  			return p2p.DiscTooManyPeers
  1130  		}
  1131  	}
  1132  
  1133  	p.UpdateRWImplementationVersion()
  1134  
  1135  	// Register the peer locally
  1136  	if err := pm.peers.Register(p, snap); err != nil {
  1137  		// if starting node with unlock account, can't register peer until finish unlock
  1138  		p.GetP2PPeer().Log().Info("Klaytn peer registration failed", "err", err)
  1139  		return err
  1140  	}
  1141  	defer pm.removePeer(p.GetID())
  1142  
  1143  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
  1144  	if err := pm.downloader.RegisterPeer(p.GetID(), p.GetVersion(), p); err != nil {
  1145  		return err
  1146  	}
  1147  	if snap != nil {
  1148  		if err := pm.downloader.GetSnapSyncer().Register(snap); err != nil {
  1149  			p.GetP2PPeer().Log().Info("Failed to register peer in snap syncer", "err", err)
  1150  			return err
  1151  		}
  1152  	}
  1153  
  1154  	// Propagate existing transactions. new transactions appearing
  1155  	// after this will be sent via broadcasts.
  1156  	pm.syncTransactions(p)
  1157  
  1158  	p.GetP2PPeer().Log().Info("Added a multichannel P2P Peer", "peerID", p.GetP2PPeerID())
  1159  
  1160  	pubKey, err := p.GetP2PPeerID().Pubkey()
  1161  	if err != nil {
  1162  		return err
  1163  	}
  1164  	addr := crypto.PubkeyToAddress(*pubKey)
  1165  	lenRWs := len(p.rws)
  1166  
  1167  	var wg sync.WaitGroup
  1168  	// TODO-GX check global worker and peer worker
  1169  	messageChannels := make([]chan p2p.Msg, 0, lenRWs)
  1170  	var consensusChannel chan p2p.Msg
  1171  	isCN := false
  1172  
  1173  	if _, ok := pm.engine.(consensus.Handler); ok && pm.nodetype == common.CONSENSUSNODE {
  1174  		consensusChannel = make(chan p2p.Msg, channelSizePerPeer)
  1175  		defer close(consensusChannel)
  1176  		pm.engine.(consensus.Handler).RegisterConsensusMsgCode(p)
  1177  		isCN = true
  1178  	}
  1179  
  1180  	for idx := range p.rws {
  1181  		channel := make(chan p2p.Msg, channelSizePerPeer)
  1182  		defer close(channel)
  1183  		messageChannels = append(messageChannels, channel)
  1184  
  1185  		p.chMgr.RegisterChannelWithIndex(idx, BlockChannel, channel)
  1186  		p.chMgr.RegisterChannelWithIndex(idx, TxChannel, channel)
  1187  		p.chMgr.RegisterChannelWithIndex(idx, MiscChannel, channel)
  1188  
  1189  		if isCN {
  1190  			p.chMgr.RegisterChannelWithIndex(idx, ConsensusChannel, consensusChannel)
  1191  		}
  1192  	}
  1193  
  1194  	sumOfGoroutineForProcessMessage := 1 // 1 is for consensusChannel
  1195  	for connIdx := range messageChannels {
  1196  		sumOfGoroutineForProcessMessage += ConcurrentOfChannel[connIdx]
  1197  	}
  1198  	errChannel := make(chan error, lenRWs+sumOfGoroutineForProcessMessage) // errChannel size should be set to count of goroutine use errChannel
  1199  	closed := make(chan struct{})
  1200  
  1201  	if isCN {
  1202  		go pm.processConsensusMsg(consensusChannel, p, addr, errChannel)
  1203  	}
  1204  
  1205  	for connIdx, messageChannel := range messageChannels {
  1206  		for i := 0; i < ConcurrentOfChannel[connIdx]; i++ {
  1207  			go pm.processMsg(messageChannel, p, addr, errChannel)
  1208  		}
  1209  	}
  1210  
  1211  	for idx, rw := range p.rws {
  1212  		wg.Add(1)
  1213  		go p.ReadMsg(rw, idx, errChannel, &wg, closed)
  1214  	}
  1215  
  1216  	err = <-errChannel
  1217  	close(closed)
  1218  	wg.Wait()
  1219  	p.GetP2PPeer().Log().Info("Disconnected a multichannel P2P Peer", "peerID", p.GetP2PPeerID(), "peerName", p.GetP2PPeer().Name(), "err", err)
  1220  	return err
  1221  }
  1222  
  1223  type ByPassValidator struct{}
  1224  
  1225  func (v ByPassValidator) ValidatePeerType(addr common.Address) error {
  1226  	return nil
  1227  }