github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/connection/connector.go (about)

     1  package connection
     2  
     3  import (
     4  	"context"
     5  
     6  	"github.com/libp2p/go-libp2p/core/peer"
     7  	"github.com/rs/zerolog"
     8  
     9  	"github.com/onflow/flow-go/network/internal/p2putils"
    10  	"github.com/onflow/flow-go/network/p2p"
    11  	p2plogging "github.com/onflow/flow-go/network/p2p/logging"
    12  	"github.com/onflow/flow-go/utils/logging"
    13  	"github.com/onflow/flow-go/utils/rand"
    14  )
    15  
    16  const (
    17  	// PruningEnabled is a boolean flag to enable pruning of connections to peers that are not part of
    18  	// the explicit update list.
    19  	// If set to true, the connector will prune connections to peers that are not part of the explicit update list.
    20  	PruningEnabled = true
    21  
    22  	// PruningDisabled is a boolean flag to disable pruning of connections to peers that are not part of
    23  	// the explicit update list.
    24  	// If set to false, the connector will not prune connections to peers that are not part of the explicit update list.
    25  	PruningDisabled = false
    26  )
    27  
    28  // PeerUpdater is a connector that connects to a list of peers and disconnects from any other connection that the libp2p node might have.
    29  type PeerUpdater struct {
    30  	connector        p2p.Connector
    31  	host             p2p.ConnectorHost
    32  	log              zerolog.Logger
    33  	pruneConnections bool
    34  }
    35  
    36  // PeerUpdaterConfig is the configuration for the libp2p based connector.
    37  type PeerUpdaterConfig struct {
    38  	// PruneConnections is a boolean flag to enable pruning of connections to peers that are not part of the explicit update list.
    39  	PruneConnections bool
    40  
    41  	// Logger is the logger to be used by the connector
    42  	Logger zerolog.Logger
    43  
    44  	// Host is the libp2p host to be used by the connector.
    45  	Host p2p.ConnectorHost
    46  
    47  	// ConnectorFactory is a factory function to create a new connector.
    48  	Connector p2p.Connector
    49  }
    50  
    51  var _ p2p.PeerUpdater = (*PeerUpdater)(nil)
    52  
    53  // NewPeerUpdater creates a new libp2p based connector
    54  // Args:
    55  //   - cfg: configuration for the connector
    56  //
    57  // Returns:
    58  //   - *PeerUpdater: a new libp2p based connector
    59  //   - error: an error if there is any error while creating the connector. The errors are irrecoverable and unexpected.
    60  func NewPeerUpdater(cfg *PeerUpdaterConfig) (*PeerUpdater, error) {
    61  	libP2PConnector := &PeerUpdater{
    62  		log:              cfg.Logger.With().Str("component", "peer-updater").Logger(),
    63  		connector:        cfg.Connector,
    64  		host:             cfg.Host,
    65  		pruneConnections: cfg.PruneConnections,
    66  	}
    67  
    68  	return libP2PConnector, nil
    69  }
    70  
    71  // UpdatePeers is the implementation of the Connector.UpdatePeers function. It connects to all of the ids and
    72  // disconnects from any other connection that the libp2p node might have.
    73  func (l *PeerUpdater) UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) {
    74  	// connect to each of the peer.AddrInfo in pInfos
    75  	l.connectToPeers(ctx, peerIDs)
    76  
    77  	if l.pruneConnections {
    78  		// disconnect from any other peers not in pInfos
    79  		// Note: by default almost on all roles, we run on a full topology,
    80  		// this trimming only affects evicted peers from protocol state.
    81  		l.pruneAllConnectionsExcept(peerIDs)
    82  	}
    83  }
    84  
    85  // connectToPeers connects each of the peer in pInfos
    86  func (l *PeerUpdater) connectToPeers(ctx context.Context, peerIDs peer.IDSlice) {
    87  
    88  	// create a channel of peer.AddrInfo as expected by the connector
    89  	peerCh := make(chan peer.AddrInfo, len(peerIDs))
    90  
    91  	// first shuffle, and then stuff all the peer.AddrInfo it into the channel.
    92  	// shuffling is not in place.
    93  	err := rand.Shuffle(uint(len(peerIDs)), func(i, j uint) {
    94  		peerIDs[i], peerIDs[j] = peerIDs[j], peerIDs[i]
    95  	})
    96  	if err != nil {
    97  		// this should never happen, but if it does, we should crash.
    98  		l.log.Fatal().Err(err).Msg("failed to shuffle peer IDs")
    99  	}
   100  
   101  	for _, peerID := range peerIDs {
   102  		if l.host.IsConnectedTo(peerID) {
   103  			l.log.Trace().Str("peer_id", p2plogging.PeerId(peerID)).Msg("already connected to peer, skipping connection")
   104  			continue
   105  		}
   106  		peerCh <- peer.AddrInfo{ID: peerID}
   107  	}
   108  
   109  	// close the channel to ensure Connect does not block
   110  	close(peerCh)
   111  
   112  	// ask the connector to connect to all the peers
   113  	l.connector.Connect(ctx, peerCh)
   114  }
   115  
   116  // pruneAllConnectionsExcept trims all connections of the node from peers not part of peerIDs.
   117  // A node would have created such extra connections earlier when the identity list may have been different, or
   118  // it may have been target of such connections from node which have now been excluded.
   119  func (l *PeerUpdater) pruneAllConnectionsExcept(peerIDs peer.IDSlice) {
   120  	// convert the peerInfos to a peer.ID -> bool map
   121  	peersToKeep := make(map[peer.ID]bool, len(peerIDs))
   122  	for _, pid := range peerIDs {
   123  		peersToKeep[pid] = true
   124  	}
   125  
   126  	// for each connection, check if that connection should be trimmed
   127  	for _, conn := range l.host.Connections() {
   128  
   129  		// get the remote peer ID for this connection
   130  		peerID := conn.RemotePeer()
   131  
   132  		// check if the peer ID is included in the current fanout
   133  		if peersToKeep[peerID] {
   134  			continue // skip pruning
   135  		}
   136  
   137  		peerInfo := l.host.PeerInfo(peerID)
   138  		lg := l.log.With().Str("remote_peer", peerInfo.String()).Logger()
   139  
   140  		// log the protected status of the connection
   141  		protected := l.host.IsProtected(peerID)
   142  		lg = lg.With().Bool("protected", protected).Logger()
   143  
   144  		// log if any stream is open on this connection.
   145  		flowStream := p2putils.FlowStream(conn)
   146  		if flowStream != nil {
   147  			lg = lg.With().Str("flow_stream", string(flowStream.Protocol())).Logger()
   148  		}
   149  		for _, stream := range conn.GetStreams() {
   150  			if err := stream.Close(); err != nil {
   151  				lg.Warn().Err(err).Msg("failed to close stream, when pruning connections")
   152  			}
   153  		}
   154  
   155  		// close the connection with the peer if it is not part of the current fanout
   156  		err := l.host.ClosePeer(peerID)
   157  		if err != nil {
   158  			// logging with suspicious level as failure to disconnect from a peer can be a security issue.
   159  			// e.g., failure to disconnect from a malicious peer can lead to a DoS attack.
   160  			lg.Error().
   161  				Bool(logging.KeySuspicious, true).
   162  				Err(err).Msg("failed to disconnect from peer")
   163  			continue
   164  		}
   165  		// logging with suspicious level as we only expect to disconnect from a peer if it is not part of the
   166  		// protocol state.
   167  		lg.Warn().
   168  			Bool(logging.KeySuspicious, true).
   169  			Msg("disconnected from peer")
   170  	}
   171  }