github.com/aergoio/aergo@v1.3.1/p2p/peermanager.go (about)

     1  /* @file @copyright defined in aergo/LICENSE.txt */
     2  
     3  package p2p
     4  
     5  import (
     6  	"github.com/aergoio/aergo/p2p/p2pkey"
     7  	"net"
     8  	"strconv"
     9  	"sync"
    10  	"sync/atomic"
    11  	"time"
    12  
    13  	"github.com/aergoio/aergo-lib/log"
    14  	"github.com/aergoio/aergo/message"
    15  	"github.com/aergoio/aergo/p2p/metric"
    16  	"github.com/aergoio/aergo/p2p/p2pcommon"
    17  	"github.com/aergoio/aergo/p2p/p2putil"
    18  	"github.com/aergoio/aergo/types"
    19  
    20  	cfg "github.com/aergoio/aergo/config"
    21  	"github.com/libp2p/go-libp2p-core/protocol"
    22  )
    23  
    24  const (
    25  	initial = iota
    26  	running
    27  	stopping
    28  	stopped
    29  )
    30  
    31  /**
    32   * peerManager connect to and listen from other nodes.
    33   * It implements  Component interface
    34   */
    35  type peerManager struct {
    36  	status            int32
    37  	is                p2pcommon.InternalService
    38  	nt                p2pcommon.NetworkTransport
    39  	hsFactory         p2pcommon.HSHandlerFactory
    40  	actorService      p2pcommon.ActorService
    41  	peerFactory       p2pcommon.PeerFactory
    42  	mm                metric.MetricsManager
    43  	lm                p2pcommon.ListManager
    44  	skipHandshakeSync bool
    45  
    46  	peerFinder p2pcommon.PeerFinder
    47  	wpManager  p2pcommon.WaitingPeerManager
    48  	// designatedPeers and hiddenPeerSet is set in construction time once and will not be changed
    49  	hiddenPeerSet map[types.PeerID]bool
    50  
    51  	mutex        *sync.Mutex
    52  	manageNumber uint32
    53  	remotePeers  map[types.PeerID]p2pcommon.RemotePeer
    54  	waitingPeers map[types.PeerID]*p2pcommon.WaitingPeer
    55  
    56  	conf *cfg.P2PConfig
    57  	// peerCache is copy-on-write style
    58  	peerCache []p2pcommon.RemotePeer
    59  
    60  	getPeerChannel    chan getPeerTask
    61  	peerHandshaked    chan handshakeResult
    62  	removePeerChannel chan p2pcommon.RemotePeer
    63  	fillPoolChannel   chan []p2pcommon.PeerMeta
    64  	addPeerChannel    chan p2pcommon.PeerMeta
    65  	inboundConnChan   chan inboundConnEvent
    66  	workDoneChannel   chan p2pcommon.ConnWorkResult
    67  	taskChannel       chan pmTask
    68  	finishChannel     chan struct{}
    69  
    70  	eventListeners []p2pcommon.PeerEventListener
    71  
    72  	//
    73  	designatedPeers map[types.PeerID]p2pcommon.PeerMeta
    74  
    75  	logger *log.Logger
    76  }
    77  
    78  // getPeerTask is struct to get peer for concurrent use
    79  type getPeerTask struct {
    80  	id  types.PeerID
    81  	ret chan p2pcommon.RemotePeer
    82  }
    83  
    84  var _ p2pcommon.PeerManager = (*peerManager)(nil)
    85  
    86  // NewPeerManager creates a peer manager object.
    87  func NewPeerManager(is p2pcommon.InternalService, hsFactory p2pcommon.HSHandlerFactory, actor p2pcommon.ActorService, pf p2pcommon.PeerFactory, nt p2pcommon.NetworkTransport, mm metric.MetricsManager, lm p2pcommon.ListManager, logger *log.Logger, cfg *cfg.Config, skipHandshakeSync bool) p2pcommon.PeerManager {
    88  	p2pConf := cfg.P2P
    89  	//logger.SetLevel("debug")
    90  	pm := &peerManager{
    91  		is:                is,
    92  		nt:                nt,
    93  		hsFactory:         hsFactory,
    94  		actorService:      actor,
    95  		conf:              p2pConf,
    96  		peerFactory:       pf,
    97  		mm:                mm,
    98  		lm:                lm,
    99  		logger:            logger,
   100  		mutex:             &sync.Mutex{},
   101  		skipHandshakeSync: skipHandshakeSync,
   102  
   103  		status:          initial,
   104  		designatedPeers: make(map[types.PeerID]p2pcommon.PeerMeta, len(cfg.P2P.NPAddPeers)),
   105  		hiddenPeerSet:   make(map[types.PeerID]bool, len(cfg.P2P.NPHiddenPeers)),
   106  
   107  		remotePeers: make(map[types.PeerID]p2pcommon.RemotePeer, p2pConf.NPMaxPeers),
   108  
   109  		waitingPeers: make(map[types.PeerID]*p2pcommon.WaitingPeer, p2pConf.NPPeerPool),
   110  
   111  		peerCache: make([]p2pcommon.RemotePeer, 0, p2pConf.NPMaxPeers),
   112  
   113  		getPeerChannel:    make(chan getPeerTask),
   114  		peerHandshaked:    make(chan handshakeResult),
   115  		removePeerChannel: make(chan p2pcommon.RemotePeer),
   116  		fillPoolChannel:   make(chan []p2pcommon.PeerMeta, 2),
   117  		addPeerChannel:    make(chan p2pcommon.PeerMeta),
   118  		inboundConnChan:   make(chan inboundConnEvent),
   119  		workDoneChannel:   make(chan p2pcommon.ConnWorkResult),
   120  		eventListeners:    make([]p2pcommon.PeerEventListener, 0, 4),
   121  		taskChannel:       make(chan pmTask, 4),
   122  		finishChannel:     make(chan struct{}),
   123  	}
   124  
   125  	// additional initializations
   126  	pm.init()
   127  
   128  	return pm
   129  }
   130  
   131  func (pm *peerManager) SelfMeta() p2pcommon.PeerMeta {
   132  	return pm.nt.SelfMeta()
   133  }
   134  func (pm *peerManager) SelfNodeID() types.PeerID {
   135  	return p2pkey.NodeID()
   136  }
   137  
   138  func (pm *peerManager) init() {
   139  	// set designated peers
   140  	pm.initDesignatedPeerList()
   141  	// init hidden peers
   142  	for _, pidStr := range pm.conf.NPHiddenPeers {
   143  		pid, err := types.IDB58Decode(pidStr)
   144  		if err != nil {
   145  			panic("Invalid pid in NPHiddenPeers : " + pidStr + " err " + err.Error())
   146  		}
   147  		pm.hiddenPeerSet[pid] = true
   148  	}
   149  
   150  	pm.peerFinder = NewPeerFinder(pm.logger, pm, pm.actorService, pm.conf.NPPeerPool, pm.conf.NPDiscoverPeers, pm.conf.NPUsePolaris)
   151  	pm.wpManager = NewWaitingPeerManager(pm.logger, pm, pm.lm, pm.conf.NPPeerPool, pm.conf.NPDiscoverPeers)
   152  	// add designated peers to waiting pool at initial time.
   153  	for _, meta := range pm.designatedPeers {
   154  		if _, foundInWait := pm.waitingPeers[meta.ID]; !foundInWait {
   155  			pm.waitingPeers[meta.ID] = &p2pcommon.WaitingPeer{Meta: meta, NextTrial: time.Now()}
   156  		}
   157  	}
   158  }
   159  
   160  func (pm *peerManager) AddPeerEventListener(l p2pcommon.PeerEventListener) {
   161  	pm.mutex.Lock()
   162  	defer pm.mutex.Unlock()
   163  	pm.eventListeners = append(pm.eventListeners, l)
   164  }
   165  
   166  func (pm *peerManager) Start() error {
   167  	go pm.runManagePeers()
   168  
   169  	return nil
   170  }
   171  
   172  func (pm *peerManager) Stop() error {
   173  	if atomic.CompareAndSwapInt32(&pm.status, running, stopping) {
   174  		pm.finishChannel <- struct{}{}
   175  	} else {
   176  		// leave stopped if already stopped
   177  		if atomic.SwapInt32(&pm.status, stopping) == stopped {
   178  			atomic.StoreInt32(&pm.status, stopped)
   179  		}
   180  	}
   181  	return nil
   182  }
   183  
   184  func (pm *peerManager) initDesignatedPeerList() {
   185  	// add remote node from config
   186  	for _, target := range pm.conf.NPAddPeers {
   187  		peerMeta, err := p2putil.FromMultiAddrString(target)
   188  		if err != nil {
   189  			pm.logger.Warn().Err(err).Str("str", target).Msg("invalid NPAddPeer address")
   190  			continue
   191  		}
   192  		peerMeta.Designated = true
   193  		peerMeta.Outbound = true
   194  		pm.logger.Info().Str(p2putil.LogFullID, peerMeta.ID.Pretty()).Str(p2putil.LogPeerID, p2putil.ShortForm(peerMeta.ID)).Str("addr", peerMeta.IPAddress).Uint32("port", peerMeta.Port).Msg("Adding Designated peer")
   195  		pm.designatedPeers[peerMeta.ID] = peerMeta
   196  	}
   197  }
   198  
   199  func (pm *peerManager) runManagePeers() {
   200  
   201  	pm.logger.Info().Str("p2p_proto", p2putil.ProtocolIDsToString([]protocol.ID{p2pcommon.P2PSubAddr, p2pcommon.LegacyP2PSubAddr})).Msg("Starting p2p listening")
   202  	pm.nt.AddStreamHandler(p2pcommon.LegacyP2PSubAddr, pm.wpManager.OnInboundConnLegacy)
   203  	pm.nt.AddStreamHandler(p2pcommon.P2PSubAddr, pm.wpManager.OnInboundConn)
   204  
   205  	if !atomic.CompareAndSwapInt32(&pm.status, initial, running) {
   206  		panic("wrong internal status")
   207  	}
   208  	instantStart := time.Millisecond << 4
   209  	initialAddrDelay := time.Second * 2
   210  	finderTimer := time.NewTimer(initialAddrDelay)
   211  	connManTimer := time.NewTimer(initialAddrDelay << 1)
   212  
   213  MANLOOP:
   214  	for {
   215  		select {
   216  		case req := <-pm.getPeerChannel:
   217  			peer, exist := pm.remotePeers[req.id]
   218  			if exist {
   219  				req.ret <- peer
   220  			} else {
   221  				req.ret <- nil
   222  			}
   223  		case hsreslt := <-pm.peerHandshaked:
   224  			if peer := pm.tryRegister(hsreslt); peer != nil {
   225  				pm.peerFinder.OnPeerConnect(peer.ID())
   226  				pm.wpManager.OnPeerConnect(peer.ID())
   227  
   228  				pm.checkSync(peer)
   229  
   230  				// query other peers
   231  				if !finderTimer.Stop() {
   232  					<-finderTimer.C
   233  				}
   234  				finderTimer.Reset(instantStart)
   235  			}
   236  		case peer := <-pm.removePeerChannel:
   237  			if pm.removePeer(peer) {
   238  				pm.peerFinder.OnPeerDisconnect(peer)
   239  				pm.wpManager.OnPeerDisconnect(peer)
   240  			}
   241  			if !connManTimer.Stop() {
   242  				<-connManTimer.C
   243  			}
   244  			connManTimer.Reset(instantStart)
   245  		case inInfo := <-pm.inboundConnChan:
   246  			id := inInfo.meta.ID
   247  			if _, found := pm.remotePeers[id]; found {
   248  				inInfo.foundC <- true
   249  			} else {
   250  				inInfo.foundC <- false
   251  			}
   252  		case workResult := <-pm.workDoneChannel:
   253  			pm.wpManager.OnWorkDone(workResult)
   254  			// Retry
   255  			if !connManTimer.Stop() {
   256  				<-connManTimer.C
   257  			}
   258  			connManTimer.Reset(instantStart)
   259  		case <-finderTimer.C:
   260  			pm.peerFinder.CheckAndFill()
   261  			finderTimer.Reset(DiscoveryQueryInterval)
   262  		case <-connManTimer.C:
   263  			pm.wpManager.CheckAndConnect()
   264  			// fire at next interval
   265  			connManTimer.Reset(p2pcommon.WaitingPeerManagerInterval)
   266  			//connManTimer.Reset(time.Second*5)
   267  		case peerMeta := <-pm.addPeerChannel:
   268  			pm.wpManager.InstantConnect(peerMeta)
   269  		case peerMetas := <-pm.fillPoolChannel:
   270  			if pm.wpManager.OnDiscoveredPeers(peerMetas) > 0 {
   271  				if !connManTimer.Stop() {
   272  					<-connManTimer.C
   273  				}
   274  				connManTimer.Reset(instantStart)
   275  			}
   276  		case task := <-pm.taskChannel:
   277  			task()
   278  		case <-pm.finishChannel:
   279  			finderTimer.Stop()
   280  			connManTimer.Stop()
   281  			break MANLOOP
   282  		}
   283  	}
   284  	// guaranty no new peer connection will be made
   285  	pm.nt.RemoveStreamHandler(p2pcommon.LegacyP2PSubAddr)
   286  	pm.nt.RemoveStreamHandler(p2pcommon.P2PSubAddr)
   287  
   288  	pm.logger.Info().Msg("Finishing peerManager")
   289  
   290  	go func() {
   291  		// closing all peer connections
   292  		for _, peer := range pm.peerCache {
   293  			peer.Stop()
   294  		}
   295  	}()
   296  	timer := time.NewTimer(time.Second * 30)
   297  	finishPoll := time.NewTicker(time.Millisecond << 6)
   298  CLEANUPLOOP:
   299  	for {
   300  		select {
   301  		case req := <-pm.getPeerChannel:
   302  			req.ret <- nil
   303  		case peer := <-pm.removePeerChannel:
   304  			pm.removePeer(peer)
   305  		case <-finishPoll.C:
   306  			if len(pm.remotePeers) == 0 {
   307  				pm.logger.Debug().Msg("All peers were finished peerManager")
   308  				break CLEANUPLOOP
   309  			}
   310  		case <-timer.C:
   311  			pm.logger.Warn().Int("remained", len(pm.peerCache)).Msg("peermanager stop timeout. some peers were not finished.")
   312  			break CLEANUPLOOP
   313  		}
   314  	}
   315  	atomic.StoreInt32(&pm.status, stopped)
   316  }
   317  
   318  // tryRegister register peer to peer manager, if peer with same peer
   319  func (pm *peerManager) tryRegister(hsresult handshakeResult) p2pcommon.RemotePeer {
   320  	meta := hsresult.meta
   321  	peerID := meta.ID
   322  	preExistPeer, ok := pm.remotePeers[peerID]
   323  	if ok {
   324  		pm.logger.Info().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("Peer add collision. Outbound connection of higher hash will survive.")
   325  		iAmLowerOrEqual := p2putil.ComparePeerID(pm.is.SelfNodeID(), meta.ID) <= 0
   326  		if iAmLowerOrEqual == meta.Outbound {
   327  			pm.logger.Info().Str("local_peer_id", p2putil.ShortForm(pm.is.SelfNodeID())).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Bool("outbound", meta.Outbound).Msg("Close connection and keep earlier handshake connection.")
   328  			return nil
   329  		} else {
   330  			pm.logger.Info().Str("local_peer_id", p2putil.ShortForm(pm.is.SelfNodeID())).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Bool("outbound", meta.Outbound).Msg("Keep connection and close earlier handshake connection.")
   331  			// stopping lower valued connection
   332  			preExistPeer.Stop()
   333  		}
   334  	}
   335  
   336  	meta = pm.changePeerAttributes(meta, peerID)
   337  	newPeer := pm.peerFactory.CreateRemotePeer(meta, pm.GetNextManageNum(), hsresult.status, hsresult.s, hsresult.msgRW)
   338  
   339  	go newPeer.RunPeer()
   340  
   341  	pm.insertPeer(peerID, newPeer)
   342  	pm.logger.Info().Str("role", newPeer.Role().String()).Bool("outbound", meta.Outbound).Str(p2putil.LogPeerName, newPeer.Name()).Str("addr", net.ParseIP(meta.IPAddress).String()+":"+strconv.Itoa(int(meta.Port))).Msg("peer is added to peerService")
   343  
   344  	pm.mutex.Lock()
   345  	defer pm.mutex.Unlock()
   346  	for _, listener := range pm.eventListeners {
   347  		listener.OnPeerConnect(peerID)
   348  	}
   349  
   350  	return newPeer
   351  }
   352  
   353  func (pm *peerManager) changePeerAttributes(meta p2pcommon.PeerMeta, peerID types.PeerID) (p2pcommon.PeerMeta) {
   354  	// override options by configurations of node
   355  	_, meta.Designated = pm.designatedPeers[peerID]
   356  	// hidden is set by either remote peer's asking or local node's config
   357  	if _, exist := pm.hiddenPeerSet[peerID]; exist {
   358  		meta.Hidden = true
   359  	}
   360  
   361  	return meta
   362  }
   363  
   364  func (pm *peerManager) GetNextManageNum() uint32 {
   365  	return atomic.AddUint32(&pm.manageNumber, 1)
   366  }
   367  
   368  func (pm *peerManager) AddNewPeer(meta p2pcommon.PeerMeta) {
   369  	pm.addPeerChannel <- meta
   370  }
   371  
   372  func (pm *peerManager) RemovePeer(peer p2pcommon.RemotePeer) {
   373  	pm.removePeerChannel <- peer
   374  }
   375  
   376  func (pm *peerManager) NotifyPeerAddressReceived(metas []p2pcommon.PeerMeta) {
   377  	pm.fillPoolChannel <- metas
   378  }
   379  
   380  func (pm *peerManager) UpdatePeerRole(changes []p2pcommon.AttrModifier) {
   381  	pm.taskChannel <- func() {
   382  		pm.logger.Debug().Int("size", len(changes)).Msg("changing roles of peers")
   383  		for _, ch := range changes {
   384  			if peer, found := pm.remotePeers[ch.ID]; found {
   385  				pm.logger.Debug().Str(p2putil.LogPeerName, peer.Name()).Str("from", peer.Role().String()).Str("to", ch.Role.String()).Msg("changing role of peer")
   386  				peer.ChangeRole(ch.Role)
   387  			}
   388  		}
   389  	}
   390  }
   391  
   392  // removePeer unregister managed remote peer connection
   393  // It return true if peer is exist and managed by peermanager
   394  // it must called in peermanager goroutine
   395  func (pm *peerManager) removePeer(peer p2pcommon.RemotePeer) bool {
   396  	peerID := peer.ID()
   397  	target, ok := pm.remotePeers[peerID]
   398  	if !ok {
   399  		return false
   400  	}
   401  	if target.ManageNumber() != peer.ManageNumber() {
   402  		pm.logger.Debug().Uint32("remove_num", peer.ManageNumber()).Uint32("exist_num", target.ManageNumber()).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("remove peer is requested but already removed and other instance is on")
   403  		return false
   404  	}
   405  	if target.State() == types.RUNNING {
   406  		pm.logger.Warn().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("remove peer is requested but peer is still running")
   407  	}
   408  	pm.deletePeer(peer)
   409  	pm.logger.Info().Uint32("manage_num", peer.ManageNumber()).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("removed peer in peermanager")
   410  
   411  	pm.mutex.Lock()
   412  	defer pm.mutex.Unlock()
   413  	for _, listener := range pm.eventListeners {
   414  		listener.OnPeerDisconnect(peer)
   415  	}
   416  
   417  	return true
   418  }
   419  
   420  func (pm *peerManager) GetPeer(ID types.PeerID) (p2pcommon.RemotePeer, bool) {
   421  
   422  	gc := getPeerTask{id: ID, ret: make(chan p2pcommon.RemotePeer)}
   423  	// vs code's lint does not allow direct return of map operation
   424  	pm.getPeerChannel <- gc
   425  	ptr := <-gc.ret
   426  	if ptr == nil {
   427  		return nil, false
   428  	}
   429  	return ptr, true
   430  }
   431  
   432  func (pm *peerManager) GetPeers() []p2pcommon.RemotePeer {
   433  	pm.mutex.Lock()
   434  	defer pm.mutex.Unlock()
   435  	return pm.peerCache
   436  }
   437  
   438  func (pm *peerManager) GetPeerBlockInfos() []types.PeerBlockInfo {
   439  	pm.mutex.Lock()
   440  	defer pm.mutex.Unlock()
   441  	infos := make([]types.PeerBlockInfo, len(pm.peerCache))
   442  	for i, peer := range pm.peerCache {
   443  		infos[i] = peer
   444  	}
   445  	return infos
   446  }
   447  
   448  func (pm *peerManager) GetPeerAddresses(noHidden bool, showSelf bool) []*message.PeerInfo {
   449  	peers := make([]*message.PeerInfo, 0, len(pm.peerCache))
   450  	if showSelf {
   451  		meta := pm.is.SelfMeta()
   452  		addr := meta.ToPeerAddress()
   453  		bestBlk, err := pm.actorService.GetChainAccessor().GetBestBlock()
   454  		if err != nil {
   455  			return nil
   456  		}
   457  		selfpi := &message.PeerInfo{
   458  			&addr, meta.Version, meta.Hidden, time.Now(), bestBlk.BlockHash(), bestBlk.Header.BlockNo, types.RUNNING, true}
   459  		peers = append(peers, selfpi)
   460  	}
   461  	for _, aPeer := range pm.peerCache {
   462  		meta := aPeer.Meta()
   463  		if noHidden && meta.Hidden {
   464  			continue
   465  		}
   466  		addr := meta.ToPeerAddress()
   467  		lastStatus := aPeer.LastStatus()
   468  		pi := &message.PeerInfo{
   469  			&addr, meta.Version, meta.Hidden, lastStatus.CheckTime, lastStatus.BlockHash, lastStatus.BlockNumber, aPeer.State(), false}
   470  		peers = append(peers, pi)
   471  	}
   472  	return peers
   473  }
   474  
   475  // this method should be called inside pm.mutex
   476  func (pm *peerManager) insertPeer(ID types.PeerID, peer p2pcommon.RemotePeer) {
   477  	pm.remotePeers[ID] = peer
   478  	pm.updatePeerCache()
   479  }
   480  
   481  // this method should be called inside pm.mutex
   482  func (pm *peerManager) deletePeer(peer p2pcommon.RemotePeer) {
   483  	pm.mm.Remove(peer.ID(), peer.ManageNumber())
   484  	delete(pm.remotePeers, peer.ID())
   485  	pm.updatePeerCache()
   486  }
   487  
   488  func (pm *peerManager) updatePeerCache() {
   489  	newSlice := make([]p2pcommon.RemotePeer, 0, len(pm.remotePeers))
   490  	for _, rPeer := range pm.remotePeers {
   491  		newSlice = append(newSlice, rPeer)
   492  	}
   493  	pm.mutex.Lock()
   494  	defer pm.mutex.Unlock()
   495  	pm.peerCache = newSlice
   496  }
   497  
   498  func (pm *peerManager) checkSync(peer p2pcommon.RemotePeer) {
   499  	if pm.skipHandshakeSync {
   500  		return
   501  	}
   502  
   503  	pm.logger.Debug().Uint64("target", peer.LastStatus().BlockNumber).Msg("request new syncer")
   504  	pm.actorService.SendRequest(message.SyncerSvc, &message.SyncStart{PeerID: peer.ID(), TargetNo: peer.LastStatus().BlockNumber})
   505  }
   506  
   507  func (pm *peerManager) AddDesignatedPeer(meta p2pcommon.PeerMeta) {
   508  	finished := make(chan interface{})
   509  	pm.taskChannel <- func() {
   510  		pm.designatedPeers[meta.ID] = meta
   511  		finished <- struct{}{}
   512  	}
   513  	<-finished
   514  }
   515  
   516  func (pm *peerManager) RemoveDesignatedPeer(peerID types.PeerID) {
   517  	finished := make(chan interface{})
   518  	pm.taskChannel <- func() {
   519  		delete(pm.designatedPeers, peerID)
   520  		finished <- struct{}{}
   521  	}
   522  	<-finished
   523  }
   524  
   525  func (pm *peerManager) ListDesignatedPeers() []p2pcommon.PeerMeta {
   526  	retChan := make(chan []p2pcommon.PeerMeta)
   527  	pm.taskChannel <- func() {
   528  		arr := make([]p2pcommon.PeerMeta, 0, len(pm.designatedPeers))
   529  		for _, m := range pm.designatedPeers {
   530  			arr = append(arr, m)
   531  		}
   532  		retChan <- arr
   533  	}
   534  	return <-retChan
   535  }
   536  
   537  // pmTask should not consume lots of time to process.
   538  type pmTask func()