github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/p2p/peers/status.go (about)

     1  // Package peers provides information about peers at the Ethereum consensus protocol level.
     2  //
     3  // "Protocol level" is the level above the network level, so this layer never sees or interacts with
     4  // (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
     5  // with peers that are contactable but may or may not be of the correct fork version, not currently
     6  // required due to the number of current connections, etc.
     7  //
     8  // A peer can have one of a number of states:
     9  //
    10  // - connected if we are able to talk to the remote peer
    11  // - connecting if we are attempting to be able to talk to the remote peer
    12  // - disconnecting if we are attempting to stop being able to talk to the remote peer
    13  // - disconnected if we are not able to talk to the remote peer
    14  //
    15  // For convenience, there are two aggregate states expressed in functions:
    16  //
    17  // - active if we are connecting or connected
    18  // - inactive if we are disconnecting or disconnected
    19  //
    20  // Peer information is persistent for the run of the service. This allows for collection of useful
    21  // long-term statistics such as number of bad responses obtained from the peer, giving the basis for
    22  // decisions to not talk to known-bad peers (by de-scoring them).
    23  package peers
    24  
    25  import (
    26  	"context"
    27  	"math"
    28  	"sort"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/p2p/enr"
    32  	"github.com/libp2p/go-libp2p-core/network"
    33  	"github.com/libp2p/go-libp2p-core/peer"
    34  	ma "github.com/multiformats/go-multiaddr"
    35  	manet "github.com/multiformats/go-multiaddr/net"
    36  	"github.com/pkg/errors"
    37  	types "github.com/prysmaticlabs/eth2-types"
    38  	"github.com/prysmaticlabs/go-bitfield"
    39  	"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
    40  	"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
    41  	"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
    42  	pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
    43  	"github.com/prysmaticlabs/prysm/shared/interfaces"
    44  	"github.com/prysmaticlabs/prysm/shared/params"
    45  	"github.com/prysmaticlabs/prysm/shared/rand"
    46  	"github.com/prysmaticlabs/prysm/shared/timeutils"
    47  )
    48  
    49  const (
    50  	// PeerDisconnected means there is no connection to the peer.
    51  	PeerDisconnected peerdata.PeerConnectionState = iota
    52  	// PeerDisconnecting means there is an on-going attempt to disconnect from the peer.
    53  	PeerDisconnecting
    54  	// PeerConnected means the peer has an active connection.
    55  	PeerConnected
    56  	// PeerConnecting means there is an on-going attempt to connect to the peer.
    57  	PeerConnecting
    58  )
    59  
    60  const (
    61  	// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
    62  	ColocationLimit = 5
    63  
    64  	// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
    65  	maxLimitBuffer = 150
    66  
    67  	// InboundRatio is the proportion of our connected peer limit at which we will allow inbound peers.
    68  	InboundRatio = float64(0.8)
    69  
    70  	// MinBackOffDuration minimum amount (in milliseconds) to wait before peer is re-dialed.
    71  	// When node and peer are dialing each other simultaneously connection may fail. In order, to break
    72  	// of constant dialing, peer is assigned some backoff period, and only dialed again once that backoff is up.
    73  	MinBackOffDuration = 100
    74  	// MaxBackOffDuration maximum amount (in milliseconds) to wait before peer is re-dialed.
    75  	MaxBackOffDuration = 5000
    76  )
    77  
    78  // ErrNoPeerStatus is returned when there is a map entry for a given peer but there is no chain
    79  // status for that peer. This should happen in rare circumstances only, but is a very possible
    80  // scenario in a chaotic and adversarial network.
    81  var ErrNoPeerStatus = errors.New("no chain status for peer")
    82  
    83  // Status is the structure holding the peer status information.
    84  type Status struct {
    85  	ctx       context.Context
    86  	scorers   *scorers.Service
    87  	store     *peerdata.Store
    88  	ipTracker map[string]uint64
    89  	rand      *rand.Rand
    90  }
    91  
    92  // StatusConfig represents peer status service params.
    93  type StatusConfig struct {
    94  	// PeerLimit specifies maximum amount of concurrent peers that are expected to be connect to the node.
    95  	PeerLimit int
    96  	// ScorerParams holds peer scorer configuration params.
    97  	ScorerParams *scorers.Config
    98  }
    99  
   100  // NewStatus creates a new status entity.
   101  func NewStatus(ctx context.Context, config *StatusConfig) *Status {
   102  	store := peerdata.NewStore(ctx, &peerdata.StoreConfig{
   103  		MaxPeers: maxLimitBuffer + config.PeerLimit,
   104  	})
   105  	return &Status{
   106  		ctx:       ctx,
   107  		store:     store,
   108  		scorers:   scorers.NewService(ctx, store, config.ScorerParams),
   109  		ipTracker: map[string]uint64{},
   110  		// Random generator used to calculate dial backoff period.
   111  		// It is ok to use deterministic generator, no need for true entropy.
   112  		rand: rand.NewDeterministicGenerator(),
   113  	}
   114  }
   115  
   116  // Scorers exposes peer scoring management service.
   117  func (p *Status) Scorers() *scorers.Service {
   118  	return p.scorers
   119  }
   120  
   121  // MaxPeerLimit returns the max peer limit stored in the current peer store.
   122  func (p *Status) MaxPeerLimit() int {
   123  	return p.store.Config().MaxPeers
   124  }
   125  
   126  // Add adds a peer.
   127  // If a peer already exists with this ID its address and direction are updated with the supplied data.
   128  func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, direction network.Direction) {
   129  	p.store.Lock()
   130  	defer p.store.Unlock()
   131  
   132  	if peerData, ok := p.store.PeerData(pid); ok {
   133  		// Peer already exists, just update its address info.
   134  		prevAddress := peerData.Address
   135  		peerData.Address = address
   136  		peerData.Direction = direction
   137  		if record != nil {
   138  			peerData.Enr = record
   139  		}
   140  		if !sameIP(prevAddress, address) {
   141  			p.addIpToTracker(pid)
   142  		}
   143  		return
   144  	}
   145  	peerData := &peerdata.PeerData{
   146  		Address:   address,
   147  		Direction: direction,
   148  		// Peers start disconnected; state will be updated when the handshake process begins.
   149  		ConnState: PeerDisconnected,
   150  	}
   151  	if record != nil {
   152  		peerData.Enr = record
   153  	}
   154  	p.store.SetPeerData(pid, peerData)
   155  	p.addIpToTracker(pid)
   156  }
   157  
   158  // Address returns the multiaddress of the given remote peer.
   159  // This will error if the peer does not exist.
   160  func (p *Status) Address(pid peer.ID) (ma.Multiaddr, error) {
   161  	p.store.RLock()
   162  	defer p.store.RUnlock()
   163  
   164  	if peerData, ok := p.store.PeerData(pid); ok {
   165  		return peerData.Address, nil
   166  	}
   167  	return nil, peerdata.ErrPeerUnknown
   168  }
   169  
   170  // Direction returns the direction of the given remote peer.
   171  // This will error if the peer does not exist.
   172  func (p *Status) Direction(pid peer.ID) (network.Direction, error) {
   173  	p.store.RLock()
   174  	defer p.store.RUnlock()
   175  
   176  	if peerData, ok := p.store.PeerData(pid); ok {
   177  		return peerData.Direction, nil
   178  	}
   179  	return network.DirUnknown, peerdata.ErrPeerUnknown
   180  }
   181  
   182  // ENR returns the enr for the corresponding peer id.
   183  func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
   184  	p.store.RLock()
   185  	defer p.store.RUnlock()
   186  
   187  	if peerData, ok := p.store.PeerData(pid); ok {
   188  		return peerData.Enr, nil
   189  	}
   190  	return nil, peerdata.ErrPeerUnknown
   191  }
   192  
   193  // SetChainState sets the chain state of the given remote peer.
   194  func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
   195  	p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
   196  }
   197  
   198  // ChainState gets the chain state of the given remote peer.
   199  // This will error if the peer does not exist.
   200  // This will error if there is no known chain state for the peer.
   201  func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
   202  	s, err := p.scorers.PeerStatusScorer().PeerStatus(pid)
   203  	if err != nil {
   204  		return nil, err
   205  	}
   206  	if s == nil {
   207  		return nil, ErrNoPeerStatus
   208  	}
   209  	return s, nil
   210  }
   211  
   212  // IsActive checks if a peers is active and returns the result appropriately.
   213  func (p *Status) IsActive(pid peer.ID) bool {
   214  	p.store.RLock()
   215  	defer p.store.RUnlock()
   216  
   217  	peerData, ok := p.store.PeerData(pid)
   218  	return ok && (peerData.ConnState == PeerConnected || peerData.ConnState == PeerConnecting)
   219  }
   220  
   221  // IsAboveInboundLimit checks if we are above our current inbound
   222  // peer limit.
   223  func (p *Status) IsAboveInboundLimit() bool {
   224  	p.store.RLock()
   225  	defer p.store.RUnlock()
   226  	totalInbound := 0
   227  	for _, peerData := range p.store.Peers() {
   228  		if peerData.ConnState == PeerConnected &&
   229  			peerData.Direction == network.DirInbound {
   230  			totalInbound += 1
   231  		}
   232  	}
   233  	inboundLimit := int(float64(p.ConnectedPeerLimit()) * InboundRatio)
   234  	return totalInbound > inboundLimit
   235  }
   236  
   237  // InboundLimit returns the current inbound
   238  // peer limit.
   239  func (p *Status) InboundLimit() int {
   240  	p.store.RLock()
   241  	defer p.store.RUnlock()
   242  	return int(float64(p.ConnectedPeerLimit()) * InboundRatio)
   243  }
   244  
   245  // SetMetadata sets the metadata of the given remote peer.
   246  func (p *Status) SetMetadata(pid peer.ID, metaData interfaces.Metadata) {
   247  	p.store.Lock()
   248  	defer p.store.Unlock()
   249  
   250  	peerData := p.store.PeerDataGetOrCreate(pid)
   251  	peerData.MetaData = metaData
   252  }
   253  
   254  // Metadata returns a copy of the metadata corresponding to the provided
   255  // peer id.
   256  func (p *Status) Metadata(pid peer.ID) (interfaces.Metadata, error) {
   257  	p.store.RLock()
   258  	defer p.store.RUnlock()
   259  
   260  	if peerData, ok := p.store.PeerData(pid); ok {
   261  		if peerData.MetaData == nil || peerData.MetaData.IsNil() {
   262  			return nil, nil
   263  		}
   264  		return peerData.MetaData.Copy(), nil
   265  	}
   266  	return nil, peerdata.ErrPeerUnknown
   267  }
   268  
   269  // CommitteeIndices retrieves the committee subnets the peer is subscribed to.
   270  func (p *Status) CommitteeIndices(pid peer.ID) ([]uint64, error) {
   271  	p.store.RLock()
   272  	defer p.store.RUnlock()
   273  
   274  	if peerData, ok := p.store.PeerData(pid); ok {
   275  		if peerData.Enr == nil || peerData.MetaData == nil || peerData.MetaData.IsNil() {
   276  			return []uint64{}, nil
   277  		}
   278  		return indicesFromBitfield(peerData.MetaData.AttnetsBitfield()), nil
   279  	}
   280  	return nil, peerdata.ErrPeerUnknown
   281  }
   282  
   283  // SubscribedToSubnet retrieves the peers subscribed to the given
   284  // committee subnet.
   285  func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
   286  	p.store.RLock()
   287  	defer p.store.RUnlock()
   288  
   289  	peers := make([]peer.ID, 0)
   290  	for pid, peerData := range p.store.Peers() {
   291  		// look at active peers
   292  		connectedStatus := peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected
   293  		if connectedStatus && peerData.MetaData != nil && !peerData.MetaData.IsNil() && peerData.MetaData.AttnetsBitfield() != nil {
   294  			indices := indicesFromBitfield(peerData.MetaData.AttnetsBitfield())
   295  			for _, idx := range indices {
   296  				if idx == index {
   297  					peers = append(peers, pid)
   298  					break
   299  				}
   300  			}
   301  		}
   302  	}
   303  	return peers
   304  }
   305  
   306  // SetConnectionState sets the connection state of the given remote peer.
   307  func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionState) {
   308  	p.store.Lock()
   309  	defer p.store.Unlock()
   310  
   311  	peerData := p.store.PeerDataGetOrCreate(pid)
   312  	peerData.ConnState = state
   313  }
   314  
   315  // ConnectionState gets the connection state of the given remote peer.
   316  // This will error if the peer does not exist.
   317  func (p *Status) ConnectionState(pid peer.ID) (peerdata.PeerConnectionState, error) {
   318  	p.store.RLock()
   319  	defer p.store.RUnlock()
   320  
   321  	if peerData, ok := p.store.PeerData(pid); ok {
   322  		return peerData.ConnState, nil
   323  	}
   324  	return PeerDisconnected, peerdata.ErrPeerUnknown
   325  }
   326  
   327  // ChainStateLastUpdated gets the last time the chain state of the given remote peer was updated.
   328  // This will error if the peer does not exist.
   329  func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
   330  	p.store.RLock()
   331  	defer p.store.RUnlock()
   332  
   333  	if peerData, ok := p.store.PeerData(pid); ok {
   334  		return peerData.ChainStateLastUpdated, nil
   335  	}
   336  	return timeutils.Now(), peerdata.ErrPeerUnknown
   337  }
   338  
   339  // IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
   340  // If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
   341  func (p *Status) IsBad(pid peer.ID) bool {
   342  	return p.isfromBadIP(pid) || p.scorers.IsBadPeer(pid)
   343  }
   344  
   345  // NextValidTime gets the earliest possible time it is to contact/dial
   346  // a peer again. This is used to back-off from peers in the event
   347  // they are 'full' or have banned us.
   348  func (p *Status) NextValidTime(pid peer.ID) (time.Time, error) {
   349  	p.store.RLock()
   350  	defer p.store.RUnlock()
   351  
   352  	if peerData, ok := p.store.PeerData(pid); ok {
   353  		return peerData.NextValidTime, nil
   354  	}
   355  	return timeutils.Now(), peerdata.ErrPeerUnknown
   356  }
   357  
   358  // SetNextValidTime sets the earliest possible time we are
   359  // able to contact this peer again.
   360  func (p *Status) SetNextValidTime(pid peer.ID, nextTime time.Time) {
   361  	p.store.Lock()
   362  	defer p.store.Unlock()
   363  
   364  	peerData := p.store.PeerDataGetOrCreate(pid)
   365  	peerData.NextValidTime = nextTime
   366  }
   367  
   368  // RandomizeBackOff adds extra backoff period during which peer will not be dialed.
   369  func (p *Status) RandomizeBackOff(pid peer.ID) {
   370  	p.store.Lock()
   371  	defer p.store.Unlock()
   372  
   373  	peerData := p.store.PeerDataGetOrCreate(pid)
   374  
   375  	// No need to add backoff period, if the previous one hasn't expired yet.
   376  	if !time.Now().After(peerData.NextValidTime) {
   377  		return
   378  	}
   379  
   380  	duration := time.Duration(math.Max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
   381  	peerData.NextValidTime = time.Now().Add(duration)
   382  }
   383  
   384  // IsReadyToDial checks where the given peer is ready to be
   385  // dialed again.
   386  func (p *Status) IsReadyToDial(pid peer.ID) bool {
   387  	p.store.RLock()
   388  	defer p.store.RUnlock()
   389  
   390  	if peerData, ok := p.store.PeerData(pid); ok {
   391  		timeIsZero := peerData.NextValidTime.IsZero()
   392  		isInvalidTime := peerData.NextValidTime.After(time.Now())
   393  		return timeIsZero || !isInvalidTime
   394  	}
   395  	// If no record exists, we don't restrict dials to the
   396  	// peer.
   397  	return true
   398  }
   399  
   400  // Connecting returns the peers that are connecting.
   401  func (p *Status) Connecting() []peer.ID {
   402  	p.store.RLock()
   403  	defer p.store.RUnlock()
   404  	peers := make([]peer.ID, 0)
   405  	for pid, peerData := range p.store.Peers() {
   406  		if peerData.ConnState == PeerConnecting {
   407  			peers = append(peers, pid)
   408  		}
   409  	}
   410  	return peers
   411  }
   412  
   413  // Connected returns the peers that are connected.
   414  func (p *Status) Connected() []peer.ID {
   415  	p.store.RLock()
   416  	defer p.store.RUnlock()
   417  	peers := make([]peer.ID, 0)
   418  	for pid, peerData := range p.store.Peers() {
   419  		if peerData.ConnState == PeerConnected {
   420  			peers = append(peers, pid)
   421  		}
   422  	}
   423  	return peers
   424  }
   425  
   426  // Inbound returns the current batch of inbound peers.
   427  func (p *Status) Inbound() []peer.ID {
   428  	p.store.RLock()
   429  	defer p.store.RUnlock()
   430  	peers := make([]peer.ID, 0)
   431  	for pid, peerData := range p.store.Peers() {
   432  		if peerData.Direction == network.DirInbound {
   433  			peers = append(peers, pid)
   434  		}
   435  	}
   436  	return peers
   437  }
   438  
   439  // InboundConnected returns the current batch of inbound peers that are connected.
   440  func (p *Status) InboundConnected() []peer.ID {
   441  	p.store.RLock()
   442  	defer p.store.RUnlock()
   443  	peers := make([]peer.ID, 0)
   444  	for pid, peerData := range p.store.Peers() {
   445  		if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound {
   446  			peers = append(peers, pid)
   447  		}
   448  	}
   449  	return peers
   450  }
   451  
   452  // Outbound returns the current batch of outbound peers.
   453  func (p *Status) Outbound() []peer.ID {
   454  	p.store.RLock()
   455  	defer p.store.RUnlock()
   456  	peers := make([]peer.ID, 0)
   457  	for pid, peerData := range p.store.Peers() {
   458  		if peerData.Direction == network.DirOutbound {
   459  			peers = append(peers, pid)
   460  		}
   461  	}
   462  	return peers
   463  }
   464  
   465  // OutboundConnected returns the current batch of outbound peers that are connected.
   466  func (p *Status) OutboundConnected() []peer.ID {
   467  	p.store.RLock()
   468  	defer p.store.RUnlock()
   469  	peers := make([]peer.ID, 0)
   470  	for pid, peerData := range p.store.Peers() {
   471  		if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound {
   472  			peers = append(peers, pid)
   473  		}
   474  	}
   475  	return peers
   476  }
   477  
   478  // Active returns the peers that are connecting or connected.
   479  func (p *Status) Active() []peer.ID {
   480  	p.store.RLock()
   481  	defer p.store.RUnlock()
   482  	peers := make([]peer.ID, 0)
   483  	for pid, peerData := range p.store.Peers() {
   484  		if peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected {
   485  			peers = append(peers, pid)
   486  		}
   487  	}
   488  	return peers
   489  }
   490  
   491  // Disconnecting returns the peers that are disconnecting.
   492  func (p *Status) Disconnecting() []peer.ID {
   493  	p.store.RLock()
   494  	defer p.store.RUnlock()
   495  	peers := make([]peer.ID, 0)
   496  	for pid, peerData := range p.store.Peers() {
   497  		if peerData.ConnState == PeerDisconnecting {
   498  			peers = append(peers, pid)
   499  		}
   500  	}
   501  	return peers
   502  }
   503  
   504  // Disconnected returns the peers that are disconnected.
   505  func (p *Status) Disconnected() []peer.ID {
   506  	p.store.RLock()
   507  	defer p.store.RUnlock()
   508  	peers := make([]peer.ID, 0)
   509  	for pid, peerData := range p.store.Peers() {
   510  		if peerData.ConnState == PeerDisconnected {
   511  			peers = append(peers, pid)
   512  		}
   513  	}
   514  	return peers
   515  }
   516  
   517  // Inactive returns the peers that are disconnecting or disconnected.
   518  func (p *Status) Inactive() []peer.ID {
   519  	p.store.RLock()
   520  	defer p.store.RUnlock()
   521  	peers := make([]peer.ID, 0)
   522  	for pid, peerData := range p.store.Peers() {
   523  		if peerData.ConnState == PeerDisconnecting || peerData.ConnState == PeerDisconnected {
   524  			peers = append(peers, pid)
   525  		}
   526  	}
   527  	return peers
   528  }
   529  
   530  // Bad returns the peers that are bad.
   531  func (p *Status) Bad() []peer.ID {
   532  	return p.scorers.BadResponsesScorer().BadPeers()
   533  }
   534  
   535  // All returns all the peers regardless of state.
   536  func (p *Status) All() []peer.ID {
   537  	p.store.RLock()
   538  	defer p.store.RUnlock()
   539  	pids := make([]peer.ID, 0, len(p.store.Peers()))
   540  	for pid := range p.store.Peers() {
   541  		pids = append(pids, pid)
   542  	}
   543  	return pids
   544  }
   545  
   546  // Prune clears out and removes outdated and disconnected peers.
   547  func (p *Status) Prune() {
   548  	p.store.Lock()
   549  	defer p.store.Unlock()
   550  
   551  	// Exit early if there is nothing to prune.
   552  	if len(p.store.Peers()) <= p.store.Config().MaxPeers {
   553  		return
   554  	}
   555  
   556  	notBadPeer := func(peerData *peerdata.PeerData) bool {
   557  		return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
   558  	}
   559  	type peerResp struct {
   560  		pid     peer.ID
   561  		badResp int
   562  	}
   563  	peersToPrune := make([]*peerResp, 0)
   564  	// Select disconnected peers with a smaller bad response count.
   565  	for pid, peerData := range p.store.Peers() {
   566  		if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
   567  			peersToPrune = append(peersToPrune, &peerResp{
   568  				pid:     pid,
   569  				badResp: peerData.BadResponses,
   570  			})
   571  		}
   572  	}
   573  
   574  	// Sort peers in ascending order, so the peers with the
   575  	// least amount of bad responses are pruned first. This
   576  	// is to protect the node from malicious/lousy peers so
   577  	// that their memory is still kept.
   578  	sort.Slice(peersToPrune, func(i, j int) bool {
   579  		return peersToPrune[i].badResp < peersToPrune[j].badResp
   580  	})
   581  
   582  	limitDiff := len(p.store.Peers()) - p.store.Config().MaxPeers
   583  	if limitDiff > len(peersToPrune) {
   584  		limitDiff = len(peersToPrune)
   585  	}
   586  
   587  	peersToPrune = peersToPrune[:limitDiff]
   588  
   589  	// Delete peers from map.
   590  	for _, peerData := range peersToPrune {
   591  		p.store.DeletePeerData(peerData.pid)
   592  	}
   593  	p.tallyIPTracker()
   594  }
   595  
   596  // BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
   597  // upon by the majority of peers. This method may not return the absolute highest finalized, but
   598  // the finalized epoch in which most peers can serve blocks (plurality voting).
   599  // Ideally, all peers would be reporting the same finalized epoch but some may be behind due to their
   600  // own latency, or because of their finalized epoch at the time we queried them.
   601  // Returns epoch number and list of peers that are at or beyond that epoch.
   602  func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (types.Epoch, []peer.ID) {
   603  	connected := p.Connected()
   604  	finalizedEpochVotes := make(map[types.Epoch]uint64)
   605  	pidEpoch := make(map[peer.ID]types.Epoch, len(connected))
   606  	pidHead := make(map[peer.ID]types.Slot, len(connected))
   607  	potentialPIDs := make([]peer.ID, 0, len(connected))
   608  	for _, pid := range connected {
   609  		peerChainState, err := p.ChainState(pid)
   610  		if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch >= ourFinalizedEpoch {
   611  			finalizedEpochVotes[peerChainState.FinalizedEpoch]++
   612  			pidEpoch[pid] = peerChainState.FinalizedEpoch
   613  			potentialPIDs = append(potentialPIDs, pid)
   614  			pidHead[pid] = peerChainState.HeadSlot
   615  		}
   616  	}
   617  
   618  	// Select the target epoch, which is the epoch most peers agree upon.
   619  	var targetEpoch types.Epoch
   620  	var mostVotes uint64
   621  	for epoch, count := range finalizedEpochVotes {
   622  		if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
   623  			mostVotes = count
   624  			targetEpoch = epoch
   625  		}
   626  	}
   627  
   628  	// Sort PIDs by finalized epoch, in decreasing order.
   629  	sort.Slice(potentialPIDs, func(i, j int) bool {
   630  		if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
   631  			return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
   632  		}
   633  		return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
   634  	})
   635  
   636  	// Trim potential peers to those on or after target epoch.
   637  	for i, pid := range potentialPIDs {
   638  		if pidEpoch[pid] < targetEpoch {
   639  			potentialPIDs = potentialPIDs[:i]
   640  			break
   641  		}
   642  	}
   643  
   644  	// Trim potential peers to at most maxPeers.
   645  	if len(potentialPIDs) > maxPeers {
   646  		potentialPIDs = potentialPIDs[:maxPeers]
   647  	}
   648  
   649  	return targetEpoch, potentialPIDs
   650  }
   651  
   652  // BestNonFinalized returns the highest known epoch, higher than ours,
   653  // and is shared by at least minPeers.
   654  func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
   655  	connected := p.Connected()
   656  	epochVotes := make(map[types.Epoch]uint64)
   657  	pidEpoch := make(map[peer.ID]types.Epoch, len(connected))
   658  	pidHead := make(map[peer.ID]types.Slot, len(connected))
   659  	potentialPIDs := make([]peer.ID, 0, len(connected))
   660  
   661  	ourHeadSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(ourHeadEpoch))
   662  	for _, pid := range connected {
   663  		peerChainState, err := p.ChainState(pid)
   664  		if err == nil && peerChainState != nil && peerChainState.HeadSlot > ourHeadSlot {
   665  			epoch := helpers.SlotToEpoch(peerChainState.HeadSlot)
   666  			epochVotes[epoch]++
   667  			pidEpoch[pid] = epoch
   668  			pidHead[pid] = peerChainState.HeadSlot
   669  			potentialPIDs = append(potentialPIDs, pid)
   670  		}
   671  	}
   672  
   673  	// Select the target epoch, which has enough peers' votes (>= minPeers).
   674  	var targetEpoch types.Epoch
   675  	for epoch, votes := range epochVotes {
   676  		if votes >= uint64(minPeers) && targetEpoch < epoch {
   677  			targetEpoch = epoch
   678  		}
   679  	}
   680  
   681  	// Sort PIDs by head slot, in decreasing order.
   682  	sort.Slice(potentialPIDs, func(i, j int) bool {
   683  		return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
   684  	})
   685  
   686  	// Trim potential peers to those on or after target epoch.
   687  	for i, pid := range potentialPIDs {
   688  		if pidEpoch[pid] < targetEpoch {
   689  			potentialPIDs = potentialPIDs[:i]
   690  			break
   691  		}
   692  	}
   693  
   694  	return targetEpoch, potentialPIDs
   695  }
   696  
   697  // PeersToPrune selects the most sutiable inbound peers
   698  // to disconnect the host peer from. As of this moment
   699  // the pruning relies on simple heuristics such as
   700  // bad response count. In the future scoring will be used
   701  // to determine the most suitable peers to take out.
   702  func (p *Status) PeersToPrune() []peer.ID {
   703  	connLimit := p.ConnectedPeerLimit()
   704  	inBoundLimit := p.InboundLimit()
   705  	activePeers := p.Active()
   706  	numInboundPeers := len(p.InboundConnected())
   707  	// Exit early if we are still below our max
   708  	// limit.
   709  	if len(activePeers) <= int(connLimit) {
   710  		return []peer.ID{}
   711  	}
   712  	p.store.Lock()
   713  	defer p.store.Unlock()
   714  
   715  	type peerResp struct {
   716  		pid     peer.ID
   717  		badResp int
   718  	}
   719  	peersToPrune := make([]*peerResp, 0)
   720  	// Select connected and inbound peers to prune.
   721  	for pid, peerData := range p.store.Peers() {
   722  		if peerData.ConnState == PeerConnected &&
   723  			peerData.Direction == network.DirInbound {
   724  			peersToPrune = append(peersToPrune, &peerResp{
   725  				pid:     pid,
   726  				badResp: peerData.BadResponses,
   727  			})
   728  		}
   729  	}
   730  
   731  	// Sort in descending order to favour pruning peers with a
   732  	// higher bad response count.
   733  	sort.Slice(peersToPrune, func(i, j int) bool {
   734  		return peersToPrune[i].badResp > peersToPrune[j].badResp
   735  	})
   736  
   737  	// Determine amount of peers to prune using our
   738  	// max connection limit.
   739  	amountToPrune := len(activePeers) - int(connLimit)
   740  
   741  	// Also check for inbound peers above our limit.
   742  	excessInbound := 0
   743  	if numInboundPeers > inBoundLimit {
   744  		excessInbound = numInboundPeers - inBoundLimit
   745  	}
   746  	// Prune the largest amount between excess peers and
   747  	// excess inbound peers.
   748  	if excessInbound > amountToPrune {
   749  		amountToPrune = excessInbound
   750  	}
   751  	if amountToPrune < len(peersToPrune) {
   752  		peersToPrune = peersToPrune[:amountToPrune]
   753  	}
   754  	ids := make([]peer.ID, 0, len(peersToPrune))
   755  	for _, pr := range peersToPrune {
   756  		ids = append(ids, pr.pid)
   757  	}
   758  	return ids
   759  }
   760  
   761  // HighestEpoch returns the highest epoch reported epoch amongst peers.
   762  func (p *Status) HighestEpoch() types.Epoch {
   763  	p.store.RLock()
   764  	defer p.store.RUnlock()
   765  	var highestSlot types.Slot
   766  	for _, peerData := range p.store.Peers() {
   767  		if peerData != nil && peerData.ChainState != nil && peerData.ChainState.HeadSlot > highestSlot {
   768  			highestSlot = peerData.ChainState.HeadSlot
   769  		}
   770  	}
   771  	return helpers.SlotToEpoch(highestSlot)
   772  }
   773  
   774  // ConnectedPeerLimit returns the peer limit of
   775  // concurrent peers connected to the beacon-node.
   776  func (p *Status) ConnectedPeerLimit() uint64 {
   777  	maxLim := p.MaxPeerLimit()
   778  	if maxLim <= maxLimitBuffer {
   779  		return 0
   780  	}
   781  	return uint64(maxLim) - maxLimitBuffer
   782  }
   783  
   784  func (p *Status) isfromBadIP(pid peer.ID) bool {
   785  	p.store.RLock()
   786  	defer p.store.RUnlock()
   787  
   788  	peerData, ok := p.store.PeerData(pid)
   789  	if !ok {
   790  		return false
   791  	}
   792  	if peerData.Address == nil {
   793  		return false
   794  	}
   795  	ip, err := manet.ToIP(peerData.Address)
   796  	if err != nil {
   797  		return true
   798  	}
   799  	if val, ok := p.ipTracker[ip.String()]; ok {
   800  		if val > ColocationLimit {
   801  			return true
   802  		}
   803  	}
   804  	return false
   805  }
   806  
   807  func (p *Status) addIpToTracker(pid peer.ID) {
   808  	data, ok := p.store.PeerData(pid)
   809  	if !ok {
   810  		return
   811  	}
   812  	if data.Address == nil {
   813  		return
   814  	}
   815  	ip, err := manet.ToIP(data.Address)
   816  	if err != nil {
   817  		// Should never happen, it is
   818  		// assumed every IP coming in
   819  		// is a valid ip.
   820  		return
   821  	}
   822  	// Ignore loopback addresses.
   823  	if ip.IsLoopback() {
   824  		return
   825  	}
   826  	stringIP := ip.String()
   827  	p.ipTracker[stringIP] += 1
   828  }
   829  
   830  func (p *Status) tallyIPTracker() {
   831  	tracker := map[string]uint64{}
   832  	// Iterate through all peers.
   833  	for _, peerData := range p.store.Peers() {
   834  		if peerData.Address == nil {
   835  			continue
   836  		}
   837  		ip, err := manet.ToIP(peerData.Address)
   838  		if err != nil {
   839  			// Should never happen, it is
   840  			// assumed every IP coming in
   841  			// is a valid ip.
   842  			continue
   843  		}
   844  		stringIP := ip.String()
   845  		tracker[stringIP] += 1
   846  	}
   847  	p.ipTracker = tracker
   848  }
   849  
   850  func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
   851  	// Exit early if we do get nil multiaddresses
   852  	if firstAddr == nil || secondAddr == nil {
   853  		return false
   854  	}
   855  	firstIP, err := manet.ToIP(firstAddr)
   856  	if err != nil {
   857  		return false
   858  	}
   859  	secondIP, err := manet.ToIP(secondAddr)
   860  	if err != nil {
   861  		return false
   862  	}
   863  	return firstIP.Equal(secondIP)
   864  }
   865  
   866  func indicesFromBitfield(bitV bitfield.Bitvector64) []uint64 {
   867  	committeeIdxs := make([]uint64, 0, bitV.Count())
   868  	for i := uint64(0); i < 64; i++ {
   869  		if bitV.BitAt(i) {
   870  			committeeIdxs = append(committeeIdxs, i)
   871  		}
   872  	}
   873  	return committeeIdxs
   874  }