github.com/amazechain/amc@v0.1.3/internal/p2p/peers/status.go (about)

     1  // Package peers provides information about peers at the Ethereum consensus protocol level.
     2  //
     3  // "Protocol level" is the level above the network level, so this layer never sees or interacts with
     4  // (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
     5  // with peers that are contactable but may or may not be of the correct fork version, not currently
     6  // required due to the number of current connections, etc.
     7  //
     8  // A peer can have one of a number of states:
     9  //
    10  // - connected if we are able to talk to the remote peer
    11  // - connecting if we are attempting to be able to talk to the remote peer
    12  // - disconnecting if we are attempting to stop being able to talk to the remote peer
    13  // - disconnected if we are not able to talk to the remote peer
    14  //
    15  // For convenience, there are two aggregate states expressed in functions:
    16  //
    17  // - active if we are connecting or connected
    18  // - inactive if we are disconnecting or disconnected
    19  //
    20  // Peer information is persistent for the run of the service. This allows for collection of useful
    21  // long-term statistics such as number of bad responses obtained from the peer, giving the basis for
    22  // decisions to not talk to known-bad peers (by de-scoring them).
    23  package peers
    24  
    25  import (
    26  	"context"
    27  	"fmt"
    28  	"github.com/amazechain/amc/api/protocol/sync_pb"
    29  	"github.com/amazechain/amc/common/crypto/rand"
    30  	"github.com/amazechain/amc/internal/p2p/enr"
    31  	"github.com/amazechain/amc/internal/p2p/peers/peerdata"
    32  	"github.com/amazechain/amc/internal/p2p/peers/scorers"
    33  	"github.com/amazechain/amc/log"
    34  	"github.com/holiman/uint256"
    35  	"google.golang.org/protobuf/proto"
    36  	"math"
    37  	"net"
    38  	"sort"
    39  	"time"
    40  
    41  	"github.com/libp2p/go-libp2p/core/network"
    42  	"github.com/libp2p/go-libp2p/core/peer"
    43  	ma "github.com/multiformats/go-multiaddr"
    44  	manet "github.com/multiformats/go-multiaddr/net"
    45  )
    46  
    47  const (
    48  	// PeerDisconnected means there is no connection to the peer.
    49  	PeerDisconnected peerdata.PeerConnectionState = iota
    50  	// PeerDisconnecting means there is an on-going attempt to disconnect from the peer.
    51  	PeerDisconnecting
    52  	// PeerConnected means the peer has an active connection.
    53  	PeerConnected
    54  	// PeerConnecting means there is an on-going attempt to connect to the peer.
    55  	PeerConnecting
    56  )
    57  
    58  const (
    59  	// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
    60  	ColocationLimit = 5
    61  
    62  	// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
    63  	maxLimitBuffer = 0
    64  
    65  	// InboundRatio is the proportion of our connected peer limit at which we will allow inbound peers.
    66  	InboundRatio = float64(0.8)
    67  
    68  	// MinBackOffDuration minimum amount (in milliseconds) to wait before peer is re-dialed.
    69  	// When node and peer are dialing each other simultaneously connection may fail. In order, to break
    70  	// of constant dialing, peer is assigned some backoff period, and only dialed again once that backoff is up.
    71  	MinBackOffDuration = 100
    72  	// MaxBackOffDuration maximum amount (in milliseconds) to wait before peer is re-dialed.
    73  	MaxBackOffDuration = 5000
    74  )
    75  
    76  // Status is the structure holding the peer status information.
    77  type Status struct {
    78  	ctx       context.Context
    79  	scorers   *scorers.Service
    80  	store     *peerdata.Store
    81  	ipTracker map[string]uint64
    82  	rand      *rand.Rand
    83  }
    84  
    85  // StatusConfig represents peer status service params.
    86  type StatusConfig struct {
    87  	// PeerLimit specifies maximum amount of concurrent peers that are expected to be connect to the node.
    88  	PeerLimit int
    89  	// ScorerParams holds peer scorer configuration params.
    90  	ScorerParams *scorers.Config
    91  }
    92  
    93  // NewStatus creates a new status entity.
    94  func NewStatus(ctx context.Context, config *StatusConfig) *Status {
    95  	store := peerdata.NewStore(ctx, &peerdata.StoreConfig{
    96  		MaxPeers: maxLimitBuffer + config.PeerLimit,
    97  	})
    98  	return &Status{
    99  		ctx:       ctx,
   100  		store:     store,
   101  		scorers:   scorers.NewService(ctx, store, config.ScorerParams),
   102  		ipTracker: map[string]uint64{},
   103  		// Random generator used to calculate dial backoff period.
   104  		// It is ok to use deterministic generator, no need for true entropy.
   105  		rand: rand.NewDeterministicGenerator(),
   106  	}
   107  }
   108  
   109  // Scorers exposes peer scoring management service.
   110  func (p *Status) Scorers() *scorers.Service {
   111  	return p.scorers
   112  }
   113  
   114  // MaxPeerLimit returns the max peer limit stored in the current peer store.
   115  func (p *Status) MaxPeerLimit() int {
   116  	return p.store.Config().MaxPeers
   117  }
   118  
   119  // Add adds a peer.
   120  // If a peer already exists with this ID its address and direction are updated with the supplied data.
   121  func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, direction network.Direction) {
   122  	p.store.Lock()
   123  	defer p.store.Unlock()
   124  
   125  	if peerData, ok := p.store.PeerData(pid); ok {
   126  		// Peer already exists, just update its address info.
   127  		prevAddress := peerData.Address
   128  		peerData.Address = address
   129  		peerData.Direction = direction
   130  		if record != nil {
   131  			peerData.Enr = record
   132  		}
   133  		if !sameIP(prevAddress, address) {
   134  			p.addIpToTracker(pid)
   135  		}
   136  		return
   137  	}
   138  	peerData := &peerdata.PeerData{
   139  		Address:   address,
   140  		Direction: direction,
   141  		// Peers start disconnected; state will be updated when the handshake process begins.
   142  		ConnState: PeerDisconnected,
   143  	}
   144  	if record != nil {
   145  		peerData.Enr = record
   146  	}
   147  	p.store.SetPeerData(pid, peerData)
   148  	p.addIpToTracker(pid)
   149  }
   150  
   151  // Address returns the multiaddress of the given remote peer.
   152  // This will error if the peer does not exist.
   153  func (p *Status) Address(pid peer.ID) (ma.Multiaddr, error) {
   154  	p.store.RLock()
   155  	defer p.store.RUnlock()
   156  
   157  	if peerData, ok := p.store.PeerData(pid); ok {
   158  		return peerData.Address, nil
   159  	}
   160  	return nil, peerdata.ErrPeerUnknown
   161  }
   162  
   163  // Direction returns the direction of the given remote peer.
   164  // This will error if the peer does not exist.
   165  func (p *Status) Direction(pid peer.ID) (network.Direction, error) {
   166  	p.store.RLock()
   167  	defer p.store.RUnlock()
   168  
   169  	if peerData, ok := p.store.PeerData(pid); ok {
   170  		return peerData.Direction, nil
   171  	}
   172  	return network.DirUnknown, peerdata.ErrPeerUnknown
   173  }
   174  
   175  // ENR returns the enr for the corresponding peer id.
   176  func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
   177  	p.store.RLock()
   178  	defer p.store.RUnlock()
   179  
   180  	if peerData, ok := p.store.PeerData(pid); ok {
   181  		return peerData.Enr, nil
   182  	}
   183  	return nil, peerdata.ErrPeerUnknown
   184  }
   185  
   186  // IP returns the ip address for the corresponding peer id.
   187  func (p *Status) IP(pid peer.ID) (net.IP, error) {
   188  	p.store.RLock()
   189  	defer p.store.RUnlock()
   190  
   191  	if peerData, ok := p.store.PeerData(pid); ok {
   192  		IP, err := manet.ToIP(peerData.Address)
   193  		if err != nil {
   194  			return nil, err
   195  		}
   196  		return IP, nil
   197  	}
   198  	return nil, peerdata.ErrPeerUnknown
   199  }
   200  
   201  // DialArgs returns the ip address for the corresponding peer id.
   202  func (p *Status) DialArgs(pid peer.ID) (string, error) {
   203  	p.store.RLock()
   204  	defer p.store.RUnlock()
   205  
   206  	if peerData, ok := p.store.PeerData(pid); ok && peerData.Address != nil {
   207  		protocol, details, err := manet.DialArgs(peerData.Address)
   208  		if err != nil {
   209  			return "", err
   210  		}
   211  		return fmt.Sprintf("%s://%s", protocol, details), nil
   212  	}
   213  	return "", peerdata.ErrPeerUnknown
   214  }
   215  
   216  // ConnState
   217  func (p *Status) ConnState(pid peer.ID) (peerdata.PeerConnectionState, error) {
   218  	p.store.RLock()
   219  	defer p.store.RUnlock()
   220  
   221  	if peerData, ok := p.store.PeerData(pid); ok {
   222  		return peerData.ConnState, nil
   223  	}
   224  	return 0, peerdata.ErrPeerUnknown
   225  }
   226  
   227  // SetChainState sets the chain state of the given remote peer.
   228  func (p *Status) SetChainState(pid peer.ID, chainState *sync_pb.Status) {
   229  	p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
   230  }
   231  
   232  // ChainState gets the chain state of the given remote peer.
   233  // This will error if the peer does not exist.
   234  // This will error if there is no known chain state for the peer.
   235  func (p *Status) ChainState(pid peer.ID) (*sync_pb.Status, error) {
   236  	return p.scorers.PeerStatusScorer().PeerStatus(pid)
   237  }
   238  
   239  // IsActive checks if a peers is active and returns the result appropriately.
   240  func (p *Status) IsActive(pid peer.ID) bool {
   241  	p.store.RLock()
   242  	defer p.store.RUnlock()
   243  
   244  	peerData, ok := p.store.PeerData(pid)
   245  	return ok && (peerData.ConnState == PeerConnected || peerData.ConnState == PeerConnecting)
   246  }
   247  
   248  // IsAboveInboundLimit checks if we are above our current inbound
   249  // peer limit.
   250  func (p *Status) IsAboveInboundLimit() bool {
   251  	p.store.RLock()
   252  	defer p.store.RUnlock()
   253  	totalInbound := 0
   254  	for _, peerData := range p.store.Peers() {
   255  		if peerData.ConnState == PeerConnected &&
   256  			peerData.Direction == network.DirInbound {
   257  			totalInbound += 1
   258  		}
   259  	}
   260  	inboundLimit := int(float64(p.ConnectedPeerLimit()) * InboundRatio)
   261  	return totalInbound > inboundLimit
   262  }
   263  
   264  // InboundLimit returns the current inbound
   265  // peer limit.
   266  func (p *Status) InboundLimit() int {
   267  	p.store.RLock()
   268  	defer p.store.RUnlock()
   269  	return int(float64(p.ConnectedPeerLimit()) * InboundRatio)
   270  }
   271  
   272  // SetMetadata
   273  func (p *Status) SetPing(pid peer.ID, ping *sync_pb.Ping) {
   274  	p.store.Lock()
   275  	defer p.store.Unlock()
   276  
   277  	peerData := p.store.PeerDataGetOrCreate(pid)
   278  	peerData.Ping = ping
   279  }
   280  
   281  // GetSeqNumber
   282  func (p *Status) GetPing(pid peer.ID) (*sync_pb.Ping, error) {
   283  	p.store.RLock()
   284  	defer p.store.RUnlock()
   285  
   286  	if peerData, ok := p.store.PeerData(pid); ok {
   287  		if peerData.Ping == nil {
   288  			return nil, nil
   289  		}
   290  		//todo
   291  		return proto.Clone(peerData.Ping).(*sync_pb.Ping), nil
   292  	}
   293  	return nil, peerdata.ErrPeerUnknown
   294  }
   295  
   296  // SetConnectionState sets the connection state of the given remote peer.
   297  func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionState) {
   298  	p.store.Lock()
   299  	defer p.store.Unlock()
   300  
   301  	peerData := p.store.PeerDataGetOrCreate(pid)
   302  	peerData.ConnState = state
   303  }
   304  
   305  // ConnectionState gets the connection state of the given remote peer.
   306  // This will error if the peer does not exist.
   307  func (p *Status) ConnectionState(pid peer.ID) (peerdata.PeerConnectionState, error) {
   308  	p.store.RLock()
   309  	defer p.store.RUnlock()
   310  
   311  	if peerData, ok := p.store.PeerData(pid); ok {
   312  		return peerData.ConnState, nil
   313  	}
   314  	return PeerDisconnected, peerdata.ErrPeerUnknown
   315  }
   316  
   317  // ChainStateLastUpdated gets the last time the chain state of the given remote peer was updated.
   318  // This will error if the peer does not exist.
   319  func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
   320  	p.store.RLock()
   321  	defer p.store.RUnlock()
   322  
   323  	if peerData, ok := p.store.PeerData(pid); ok {
   324  		return peerData.ChainStateLastUpdated, nil
   325  	}
   326  	return time.Now(), peerdata.ErrPeerUnknown
   327  }
   328  
   329  // IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
   330  // If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
   331  func (p *Status) IsBad(pid peer.ID) bool {
   332  	p.store.RLock()
   333  	defer p.store.RUnlock()
   334  	return p.isBad(pid)
   335  }
   336  
   337  // isBad is the lock-free version of IsBad.
   338  func (p *Status) isBad(pid peer.ID) bool {
   339  	return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
   340  }
   341  
   342  // NextValidTime gets the earliest possible time it is to contact/dial
   343  // a peer again. This is used to back-off from peers in the event
   344  // they are 'full' or have banned us.
   345  func (p *Status) NextValidTime(pid peer.ID) (time.Time, error) {
   346  	p.store.RLock()
   347  	defer p.store.RUnlock()
   348  
   349  	if peerData, ok := p.store.PeerData(pid); ok {
   350  		return peerData.NextValidTime, nil
   351  	}
   352  	return time.Now(), peerdata.ErrPeerUnknown
   353  }
   354  
   355  // SetNextValidTime sets the earliest possible time we are
   356  // able to contact this peer again.
   357  func (p *Status) SetNextValidTime(pid peer.ID, nextTime time.Time) {
   358  	p.store.Lock()
   359  	defer p.store.Unlock()
   360  
   361  	peerData := p.store.PeerDataGetOrCreate(pid)
   362  	peerData.NextValidTime = nextTime
   363  }
   364  
   365  // RandomizeBackOff adds extra backoff period during which peer will not be dialed.
   366  func (p *Status) RandomizeBackOff(pid peer.ID) {
   367  	p.store.Lock()
   368  	defer p.store.Unlock()
   369  
   370  	peerData := p.store.PeerDataGetOrCreate(pid)
   371  
   372  	// No need to add backoff period, if the previous one hasn't expired yet.
   373  	if !time.Now().After(peerData.NextValidTime) {
   374  		return
   375  	}
   376  
   377  	duration := time.Duration(math.Max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
   378  	peerData.NextValidTime = time.Now().Add(duration)
   379  }
   380  
   381  // IsReadyToDial checks where the given peer is ready to be
   382  // dialed again.
   383  func (p *Status) IsReadyToDial(pid peer.ID) bool {
   384  	p.store.RLock()
   385  	defer p.store.RUnlock()
   386  
   387  	if peerData, ok := p.store.PeerData(pid); ok {
   388  		timeIsZero := peerData.NextValidTime.IsZero()
   389  		isInvalidTime := peerData.NextValidTime.After(time.Now())
   390  		return timeIsZero || !isInvalidTime
   391  	}
   392  	// If no record exists, we don't restrict dials to the
   393  	// peer.
   394  	return true
   395  }
   396  
   397  // Connecting returns the peers that are connecting.
   398  func (p *Status) Connecting() []peer.ID {
   399  	p.store.RLock()
   400  	defer p.store.RUnlock()
   401  	peers := make([]peer.ID, 0)
   402  	for pid, peerData := range p.store.Peers() {
   403  		if peerData.ConnState == PeerConnecting {
   404  			peers = append(peers, pid)
   405  		}
   406  	}
   407  	return peers
   408  }
   409  
   410  // Connected returns the peers that are connected.
   411  func (p *Status) Connected() []peer.ID {
   412  	p.store.RLock()
   413  	defer p.store.RUnlock()
   414  	peers := make([]peer.ID, 0)
   415  	for pid, peerData := range p.store.Peers() {
   416  		if peerData.ConnState == PeerConnected {
   417  			peers = append(peers, pid)
   418  		}
   419  	}
   420  	return peers
   421  }
   422  
   423  // Inbound returns the current batch of inbound peers.
   424  func (p *Status) Inbound() []peer.ID {
   425  	p.store.RLock()
   426  	defer p.store.RUnlock()
   427  	peers := make([]peer.ID, 0)
   428  	for pid, peerData := range p.store.Peers() {
   429  		if peerData.Direction == network.DirInbound {
   430  			peers = append(peers, pid)
   431  		}
   432  	}
   433  	return peers
   434  }
   435  
   436  // InboundConnected returns the current batch of inbound peers that are connected.
   437  func (p *Status) InboundConnected() []peer.ID {
   438  	p.store.RLock()
   439  	defer p.store.RUnlock()
   440  	peers := make([]peer.ID, 0)
   441  	for pid, peerData := range p.store.Peers() {
   442  		if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound {
   443  			peers = append(peers, pid)
   444  		}
   445  	}
   446  	return peers
   447  }
   448  
   449  // Outbound returns the current batch of outbound peers.
   450  func (p *Status) Outbound() []peer.ID {
   451  	p.store.RLock()
   452  	defer p.store.RUnlock()
   453  	peers := make([]peer.ID, 0)
   454  	for pid, peerData := range p.store.Peers() {
   455  		if peerData.Direction == network.DirOutbound {
   456  			peers = append(peers, pid)
   457  		}
   458  	}
   459  	return peers
   460  }
   461  
   462  // OutboundConnected returns the current batch of outbound peers that are connected.
   463  func (p *Status) OutboundConnected() []peer.ID {
   464  	p.store.RLock()
   465  	defer p.store.RUnlock()
   466  	peers := make([]peer.ID, 0)
   467  	for pid, peerData := range p.store.Peers() {
   468  		if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound {
   469  			peers = append(peers, pid)
   470  		}
   471  	}
   472  	return peers
   473  }
   474  
   475  // Active returns the peers that are connecting or connected.
   476  func (p *Status) Active() []peer.ID {
   477  	p.store.RLock()
   478  	defer p.store.RUnlock()
   479  	peers := make([]peer.ID, 0)
   480  	for pid, peerData := range p.store.Peers() {
   481  		if peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected {
   482  			peers = append(peers, pid)
   483  		}
   484  	}
   485  	return peers
   486  }
   487  
   488  // Disconnecting returns the peers that are disconnecting.
   489  func (p *Status) Disconnecting() []peer.ID {
   490  	p.store.RLock()
   491  	defer p.store.RUnlock()
   492  	peers := make([]peer.ID, 0)
   493  	for pid, peerData := range p.store.Peers() {
   494  		if peerData.ConnState == PeerDisconnecting {
   495  			peers = append(peers, pid)
   496  		}
   497  	}
   498  	return peers
   499  }
   500  
   501  // Disconnected returns the peers that are disconnected.
   502  func (p *Status) Disconnected() []peer.ID {
   503  	p.store.RLock()
   504  	defer p.store.RUnlock()
   505  	peers := make([]peer.ID, 0)
   506  	for pid, peerData := range p.store.Peers() {
   507  		if peerData.ConnState == PeerDisconnected {
   508  			peers = append(peers, pid)
   509  		}
   510  	}
   511  	return peers
   512  }
   513  
   514  // Inactive returns the peers that are disconnecting or disconnected.
   515  func (p *Status) Inactive() []peer.ID {
   516  	p.store.RLock()
   517  	defer p.store.RUnlock()
   518  	peers := make([]peer.ID, 0)
   519  	for pid, peerData := range p.store.Peers() {
   520  		if peerData.ConnState == PeerDisconnecting || peerData.ConnState == PeerDisconnected {
   521  			peers = append(peers, pid)
   522  		}
   523  	}
   524  	return peers
   525  }
   526  
   527  // Bad returns the peers that are bad.
   528  func (p *Status) Bad() []peer.ID {
   529  	return p.scorers.BadResponsesScorer().BadPeers()
   530  }
   531  
   532  // All returns all the peers regardless of state.
   533  func (p *Status) All() []peer.ID {
   534  	p.store.RLock()
   535  	defer p.store.RUnlock()
   536  	pids := make([]peer.ID, 0, len(p.store.Peers()))
   537  	for pid := range p.store.Peers() {
   538  		pids = append(pids, pid)
   539  	}
   540  	return pids
   541  }
   542  
   543  // Prune clears out and removes outdated and disconnected peers.
   544  func (p *Status) Prune() {
   545  	p.store.Lock()
   546  	defer p.store.Unlock()
   547  
   548  	// Exit early if there is nothing to prune.
   549  	if len(p.store.Peers()) <= p.store.Config().MaxPeers {
   550  		return
   551  	}
   552  	notBadPeer := func(pid peer.ID) bool {
   553  		return !p.isBad(pid)
   554  	}
   555  	type peerResp struct {
   556  		pid   peer.ID
   557  		score float64
   558  	}
   559  	peersToPrune := make([]*peerResp, 0)
   560  	// Select disconnected peers with a smaller bad response count.
   561  	for pid, peerData := range p.store.Peers() {
   562  		if peerData.ConnState == PeerDisconnected && notBadPeer(pid) {
   563  			peersToPrune = append(peersToPrune, &peerResp{
   564  				pid:   pid,
   565  				score: p.Scorers().ScoreNoLock(pid),
   566  			})
   567  		}
   568  	}
   569  
   570  	// Sort peers in descending order, so the peers with the
   571  	// highest score are pruned first. This
   572  	// is to protect the node from malicious/lousy peers so
   573  	// that their memory is still kept.
   574  	sort.Slice(peersToPrune, func(i, j int) bool {
   575  		return peersToPrune[i].score > peersToPrune[j].score
   576  	})
   577  
   578  	limitDiff := len(p.store.Peers()) - p.store.Config().MaxPeers
   579  	if limitDiff > len(peersToPrune) {
   580  		limitDiff = len(peersToPrune)
   581  	}
   582  
   583  	peersToPrune = peersToPrune[:limitDiff]
   584  
   585  	// Delete peers from map.
   586  	for _, peerData := range peersToPrune {
   587  		p.store.DeletePeerData(peerData.pid)
   588  	}
   589  	p.tallyIPTracker()
   590  }
   591  
   592  // BestPeers returns the highest blockNumber higher than ours,
   593  // and is shared by at least minPeers.
   594  func (p *Status) BestPeers(wantPeers int, ourCurrentHeight *uint256.Int) (*uint256.Int, []peer.ID) {
   595  	connected := p.Connected()
   596  
   597  	pidHead := make(map[peer.ID]*uint256.Int, len(connected))
   598  	potentialPIDs := make([]peer.ID, 0, len(connected))
   599  	for _, pid := range connected {
   600  		peerData, ok := p.store.PeerData(pid)
   601  		if ok && peerData.CurrentHeight().Cmp(ourCurrentHeight) == 1 {
   602  			pidHead[pid] = peerData.CurrentHeight()
   603  			potentialPIDs = append(potentialPIDs, pid)
   604  		}
   605  	}
   606  
   607  	// Sort PIDs by CurrentHeight, in decreasing order.
   608  	sort.Slice(potentialPIDs, func(i, j int) bool {
   609  		return pidHead[potentialPIDs[i]].Cmp(pidHead[potentialPIDs[j]]) == 1
   610  	})
   611  
   612  	if len(potentialPIDs) > wantPeers {
   613  		potentialPIDs = potentialPIDs[:wantPeers]
   614  	}
   615  
   616  	if len(potentialPIDs) == 0 {
   617  		return uint256.NewInt(0), []peer.ID{}
   618  	}
   619  
   620  	// Select the target epoch, which has enough peers' votes (>= minPeers).
   621  	var targetBlockNumber *uint256.Int
   622  	targetBlockNumber = pidHead[potentialPIDs[len(potentialPIDs)-1]]
   623  
   624  	return targetBlockNumber, potentialPIDs
   625  
   626  }
   627  
   628  // PeersToPrune selects the most suitable inbound peers
   629  // to disconnect the host peer from. As of this moment
   630  // the pruning relies on simple heuristics such as
   631  // bad response count. In the future scoring will be used
   632  // to determine the most suitable peers to take out.
   633  func (p *Status) PeersToPrune() []peer.ID {
   634  	connLimit := p.ConnectedPeerLimit()
   635  	inBoundLimit := uint64(p.InboundLimit())
   636  	activePeers := p.Active()
   637  	numInboundPeers := uint64(len(p.InboundConnected()))
   638  	// Exit early if we are still below our max
   639  	// limit.
   640  	if uint64(len(activePeers)) <= connLimit {
   641  		return []peer.ID{}
   642  	}
   643  	p.store.Lock()
   644  	defer p.store.Unlock()
   645  
   646  	type peerResp struct {
   647  		pid   peer.ID
   648  		score float64
   649  	}
   650  	peersToPrune := make([]*peerResp, 0)
   651  	// Select connected and inbound peers to prune.
   652  	for pid, peerData := range p.store.Peers() {
   653  		if peerData.ConnState == PeerConnected &&
   654  			peerData.Direction == network.DirInbound {
   655  			peersToPrune = append(peersToPrune, &peerResp{
   656  				pid:   pid,
   657  				score: p.scorers.ScoreNoLock(pid),
   658  			})
   659  		}
   660  	}
   661  
   662  	// Sort in ascending order to favour pruning peers with a
   663  	// lower score.
   664  	sort.Slice(peersToPrune, func(i, j int) bool {
   665  		return peersToPrune[i].score < peersToPrune[j].score
   666  	})
   667  
   668  	// Determine amount of peers to prune using our
   669  	// max connection limit.
   670  	if connLimit > uint64(len(activePeers)) {
   671  		// This should never happen.
   672  		log.Error("Failed to determine amount of peers to prune")
   673  		return []peer.ID{}
   674  	}
   675  
   676  	amountToPrune := uint64(len(activePeers)) - connLimit
   677  
   678  	// Also check for inbound peers above our limit.
   679  	excessInbound := uint64(0)
   680  	if numInboundPeers > inBoundLimit {
   681  		excessInbound = numInboundPeers - inBoundLimit
   682  	}
   683  	// Prune the largest amount between excess peers and
   684  	// excess inbound peers.
   685  	if excessInbound > amountToPrune {
   686  		amountToPrune = excessInbound
   687  	}
   688  	if amountToPrune < uint64(len(peersToPrune)) {
   689  		peersToPrune = peersToPrune[:amountToPrune]
   690  	}
   691  	ids := make([]peer.ID, 0, len(peersToPrune))
   692  	for _, pr := range peersToPrune {
   693  		ids = append(ids, pr.pid)
   694  	}
   695  	return ids
   696  }
   697  
   698  // HighestBlockNumber returns the highest epoch reported epoch amongst peers.
   699  func (p *Status) HighestBlockNumber() *uint256.Int {
   700  	p.store.RLock()
   701  	defer p.store.RUnlock()
   702  	var highestSlot *uint256.Int
   703  	for _, peerData := range p.store.Peers() {
   704  		if peerData != nil && peerData.ChainState != nil && peerData.CurrentHeight().Cmp(highestSlot) == 1 {
   705  			highestSlot = peerData.CurrentHeight()
   706  		}
   707  	}
   708  	return highestSlot.Clone()
   709  }
   710  
   711  // ConnectedPeerLimit returns the peer limit of
   712  // concurrent peers connected to the beacon-node.
   713  func (p *Status) ConnectedPeerLimit() uint64 {
   714  	maxLim := p.MaxPeerLimit()
   715  	if maxLim <= maxLimitBuffer {
   716  		return 0
   717  	}
   718  	return uint64(maxLim) - maxLimitBuffer
   719  }
   720  
   721  // this method assumes the store lock is acquired before
   722  // executing the method.
   723  func (p *Status) isfromBadIP(pid peer.ID) bool {
   724  	peerData, ok := p.store.PeerData(pid)
   725  	if !ok {
   726  		return false
   727  	}
   728  	if peerData.Address == nil {
   729  		return false
   730  	}
   731  	ip, err := manet.ToIP(peerData.Address)
   732  	if err != nil {
   733  		return true
   734  	}
   735  	if val, ok := p.ipTracker[ip.String()]; ok {
   736  		if val > ColocationLimit {
   737  			return true
   738  		}
   739  	}
   740  	return false
   741  }
   742  
   743  func (p *Status) addIpToTracker(pid peer.ID) {
   744  	data, ok := p.store.PeerData(pid)
   745  	if !ok {
   746  		return
   747  	}
   748  	if data.Address == nil {
   749  		return
   750  	}
   751  	ip, err := manet.ToIP(data.Address)
   752  	if err != nil {
   753  		// Should never happen, it is
   754  		// assumed every IP coming in
   755  		// is a valid ip.
   756  		return
   757  	}
   758  	// Ignore loopback addresses.
   759  	if ip.IsLoopback() {
   760  		return
   761  	}
   762  	stringIP := ip.String()
   763  	p.ipTracker[stringIP] += 1
   764  }
   765  
   766  func (p *Status) tallyIPTracker() {
   767  	tracker := map[string]uint64{}
   768  	// Iterate through all peers.
   769  	for _, peerData := range p.store.Peers() {
   770  		if peerData.Address == nil {
   771  			continue
   772  		}
   773  		ip, err := manet.ToIP(peerData.Address)
   774  		if err != nil {
   775  			// Should never happen, it is
   776  			// assumed every IP coming in
   777  			// is a valid ip.
   778  			continue
   779  		}
   780  		stringIP := ip.String()
   781  		tracker[stringIP] += 1
   782  	}
   783  	p.ipTracker = tracker
   784  }
   785  
   786  func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
   787  	// Exit early if we do get nil multiaddresses
   788  	if firstAddr == nil || secondAddr == nil {
   789  		return false
   790  	}
   791  	firstIP, err := manet.ToIP(firstAddr)
   792  	if err != nil {
   793  		return false
   794  	}
   795  	secondIP, err := manet.ToIP(secondAddr)
   796  	if err != nil {
   797  		return false
   798  	}
   799  	return firstIP.Equal(secondIP)
   800  }