github.com/vipernet-xyz/tendermint-core@v0.32.0/p2p/transport.go (about)

     1  package p2p
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"net"
     7  	"time"
     8  
     9  	"github.com/pkg/errors"
    10  	"golang.org/x/net/netutil"
    11  
    12  	"github.com/tendermint/tendermint/crypto"
    13  	"github.com/tendermint/tendermint/p2p/conn"
    14  )
    15  
    16  const (
    17  	defaultDialTimeout      = time.Second
    18  	defaultFilterTimeout    = 5 * time.Second
    19  	defaultHandshakeTimeout = 3 * time.Second
    20  )
    21  
    22  // IPResolver is a behaviour subset of net.Resolver.
    23  type IPResolver interface {
    24  	LookupIPAddr(context.Context, string) ([]net.IPAddr, error)
    25  }
    26  
    27  // accept is the container to carry the upgraded connection and NodeInfo from an
    28  // asynchronously running routine to the Accept method.
    29  type accept struct {
    30  	netAddr  *NetAddress
    31  	conn     net.Conn
    32  	nodeInfo NodeInfo
    33  	err      error
    34  }
    35  
    36  // peerConfig is used to bundle data we need to fully setup a Peer with an
    37  // MConn, provided by the caller of Accept and Dial (currently the Switch). This
    38  // a temporary measure until reactor setup is less dynamic and we introduce the
    39  // concept of PeerBehaviour to communicate about significant Peer lifecycle
    40  // events.
    41  // TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour.
    42  type peerConfig struct {
    43  	chDescs     []*conn.ChannelDescriptor
    44  	onPeerError func(Peer, interface{})
    45  	outbound    bool
    46  	// isPersistent allows you to set a function, which, given socket address
    47  	// (for outbound peers) OR self-reported address (for inbound peers), tells
    48  	// if the peer is persistent or not.
    49  	isPersistent func(*NetAddress) bool
    50  	reactorsByCh map[byte]Reactor
    51  	metrics      *Metrics
    52  }
    53  
    54  // Transport emits and connects to Peers. The implementation of Peer is left to
    55  // the transport. Each transport is also responsible to filter establishing
    56  // peers specific to its domain.
    57  type Transport interface {
    58  	// Listening address.
    59  	NetAddress() NetAddress
    60  
    61  	// Accept returns a newly connected Peer.
    62  	Accept(peerConfig) (Peer, error)
    63  
    64  	// Dial connects to the Peer for the address.
    65  	Dial(NetAddress, peerConfig) (Peer, error)
    66  
    67  	// Cleanup any resources associated with Peer.
    68  	Cleanup(Peer)
    69  }
    70  
    71  // transportLifecycle bundles the methods for callers to control start and stop
    72  // behaviour.
    73  type transportLifecycle interface {
    74  	Close() error
    75  	Listen(NetAddress) error
    76  }
    77  
    78  // ConnFilterFunc to be implemented by filter hooks after a new connection has
    79  // been established. The set of exisiting connections is passed along together
    80  // with all resolved IPs for the new connection.
    81  type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error
    82  
    83  // ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection
    84  // and refuses new ones if they come from a known ip.
    85  func ConnDuplicateIPFilter() ConnFilterFunc {
    86  	return func(cs ConnSet, c net.Conn, ips []net.IP) error {
    87  		for _, ip := range ips {
    88  			if cs.HasIP(ip) {
    89  				return ErrRejected{
    90  					conn:        c,
    91  					err:         fmt.Errorf("ip<%v> already connected", ip),
    92  					isDuplicate: true,
    93  				}
    94  			}
    95  		}
    96  
    97  		return nil
    98  	}
    99  }
   100  
   101  // MultiplexTransportOption sets an optional parameter on the
   102  // MultiplexTransport.
   103  type MultiplexTransportOption func(*MultiplexTransport)
   104  
   105  // MultiplexTransportConnFilters sets the filters for rejection new connections.
   106  func MultiplexTransportConnFilters(
   107  	filters ...ConnFilterFunc,
   108  ) MultiplexTransportOption {
   109  	return func(mt *MultiplexTransport) { mt.connFilters = filters }
   110  }
   111  
   112  // MultiplexTransportFilterTimeout sets the timeout waited for filter calls to
   113  // return.
   114  func MultiplexTransportFilterTimeout(
   115  	timeout time.Duration,
   116  ) MultiplexTransportOption {
   117  	return func(mt *MultiplexTransport) { mt.filterTimeout = timeout }
   118  }
   119  
   120  // MultiplexTransportResolver sets the Resolver used for ip lokkups, defaults to
   121  // net.DefaultResolver.
   122  func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption {
   123  	return func(mt *MultiplexTransport) { mt.resolver = resolver }
   124  }
   125  
   126  // MultiplexTransportMaxIncomingConnections sets the maximum number of
   127  // simultaneous connections (incoming). Default: 0 (unlimited)
   128  func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption {
   129  	return func(mt *MultiplexTransport) { mt.maxIncomingConnections = n }
   130  }
   131  
   132  // MultiplexTransport accepts and dials tcp connections and upgrades them to
   133  // multiplexed peers.
   134  type MultiplexTransport struct {
   135  	netAddr                NetAddress
   136  	listener               net.Listener
   137  	maxIncomingConnections int // see MaxIncomingConnections
   138  
   139  	acceptc chan accept
   140  	closec  chan struct{}
   141  
   142  	// Lookup table for duplicate ip and id checks.
   143  	conns       ConnSet
   144  	connFilters []ConnFilterFunc
   145  
   146  	dialTimeout      time.Duration
   147  	filterTimeout    time.Duration
   148  	handshakeTimeout time.Duration
   149  	nodeInfo         NodeInfo
   150  	nodeKey          NodeKey
   151  	resolver         IPResolver
   152  
   153  	// TODO(xla): This config is still needed as we parameterise peerConn and
   154  	// peer currently. All relevant configuration should be refactored into options
   155  	// with sane defaults.
   156  	mConfig conn.MConnConfig
   157  }
   158  
   159  // Test multiplexTransport for interface completeness.
   160  var _ Transport = (*MultiplexTransport)(nil)
   161  var _ transportLifecycle = (*MultiplexTransport)(nil)
   162  
   163  // NewMultiplexTransport returns a tcp connected multiplexed peer.
   164  func NewMultiplexTransport(
   165  	nodeInfo NodeInfo,
   166  	nodeKey NodeKey,
   167  	mConfig conn.MConnConfig,
   168  ) *MultiplexTransport {
   169  	return &MultiplexTransport{
   170  		acceptc:          make(chan accept),
   171  		closec:           make(chan struct{}),
   172  		dialTimeout:      defaultDialTimeout,
   173  		filterTimeout:    defaultFilterTimeout,
   174  		handshakeTimeout: defaultHandshakeTimeout,
   175  		mConfig:          mConfig,
   176  		nodeInfo:         nodeInfo,
   177  		nodeKey:          nodeKey,
   178  		conns:            NewConnSet(),
   179  		resolver:         net.DefaultResolver,
   180  	}
   181  }
   182  
   183  // NetAddress implements Transport.
   184  func (mt *MultiplexTransport) NetAddress() NetAddress {
   185  	return mt.netAddr
   186  }
   187  
   188  // Accept implements Transport.
   189  func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) {
   190  	select {
   191  	// This case should never have any side-effectful/blocking operations to
   192  	// ensure that quality peers are ready to be used.
   193  	case a := <-mt.acceptc:
   194  		if a.err != nil {
   195  			return nil, a.err
   196  		}
   197  
   198  		cfg.outbound = false
   199  
   200  		return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil
   201  	case <-mt.closec:
   202  		return nil, ErrTransportClosed{}
   203  	}
   204  }
   205  
   206  // Dial implements Transport.
   207  func (mt *MultiplexTransport) Dial(
   208  	addr NetAddress,
   209  	cfg peerConfig,
   210  ) (Peer, error) {
   211  	c, err := addr.DialTimeout(mt.dialTimeout)
   212  	if err != nil {
   213  		return nil, err
   214  	}
   215  
   216  	// TODO(xla): Evaluate if we should apply filters if we explicitly dial.
   217  	if err := mt.filterConn(c); err != nil {
   218  		return nil, err
   219  	}
   220  
   221  	secretConn, nodeInfo, err := mt.upgrade(c, &addr)
   222  	if err != nil {
   223  		return nil, err
   224  	}
   225  
   226  	cfg.outbound = true
   227  
   228  	p := mt.wrapPeer(secretConn, nodeInfo, cfg, &addr)
   229  
   230  	return p, nil
   231  }
   232  
   233  // Close implements transportLifecycle.
   234  func (mt *MultiplexTransport) Close() error {
   235  	close(mt.closec)
   236  
   237  	if mt.listener != nil {
   238  		return mt.listener.Close()
   239  	}
   240  
   241  	return nil
   242  }
   243  
   244  // Listen implements transportLifecycle.
   245  func (mt *MultiplexTransport) Listen(addr NetAddress) error {
   246  	ln, err := net.Listen("tcp", addr.DialString())
   247  	if err != nil {
   248  		return err
   249  	}
   250  
   251  	if mt.maxIncomingConnections > 0 {
   252  		ln = netutil.LimitListener(ln, mt.maxIncomingConnections)
   253  	}
   254  
   255  	mt.netAddr = addr
   256  	mt.listener = ln
   257  
   258  	go mt.acceptPeers()
   259  
   260  	return nil
   261  }
   262  
   263  func (mt *MultiplexTransport) acceptPeers() {
   264  	for {
   265  		c, err := mt.listener.Accept()
   266  		if err != nil {
   267  			// If Close() has been called, silently exit.
   268  			select {
   269  			case _, ok := <-mt.closec:
   270  				if !ok {
   271  					return
   272  				}
   273  			default:
   274  				// Transport is not closed
   275  			}
   276  
   277  			mt.acceptc <- accept{err: err}
   278  			return
   279  		}
   280  
   281  		// Connection upgrade and filtering should be asynchronous to avoid
   282  		// Head-of-line blocking[0].
   283  		// Reference:  https://github.com/tendermint/tendermint/issues/2047
   284  		//
   285  		// [0] https://en.wikipedia.org/wiki/Head-of-line_blocking
   286  		go func(c net.Conn) {
   287  			defer func() {
   288  				if r := recover(); r != nil {
   289  					err := ErrRejected{
   290  						conn:          c,
   291  						err:           errors.Errorf("recovered from panic: %v", r),
   292  						isAuthFailure: true,
   293  					}
   294  					select {
   295  					case mt.acceptc <- accept{err: err}:
   296  					case <-mt.closec:
   297  						// Give up if the transport was closed.
   298  						_ = c.Close()
   299  						return
   300  					}
   301  				}
   302  			}()
   303  
   304  			var (
   305  				nodeInfo   NodeInfo
   306  				secretConn *conn.SecretConnection
   307  				netAddr    *NetAddress
   308  			)
   309  
   310  			err := mt.filterConn(c)
   311  			if err == nil {
   312  				secretConn, nodeInfo, err = mt.upgrade(c, nil)
   313  				if err == nil {
   314  					addr := c.RemoteAddr()
   315  					id := PubKeyToID(secretConn.RemotePubKey())
   316  					netAddr = NewNetAddress(id, addr)
   317  				}
   318  			}
   319  
   320  			select {
   321  			case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}:
   322  				// Make the upgraded peer available.
   323  			case <-mt.closec:
   324  				// Give up if the transport was closed.
   325  				_ = c.Close()
   326  				return
   327  			}
   328  		}(c)
   329  	}
   330  }
   331  
   332  // Cleanup removes the given address from the connections set and
   333  // closes the connection.
   334  func (mt *MultiplexTransport) Cleanup(p Peer) {
   335  	mt.conns.RemoveAddr(p.RemoteAddr())
   336  	_ = p.CloseConn()
   337  }
   338  
   339  func (mt *MultiplexTransport) cleanup(c net.Conn) error {
   340  	mt.conns.Remove(c)
   341  
   342  	return c.Close()
   343  }
   344  
   345  func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) {
   346  	defer func() {
   347  		if err != nil {
   348  			_ = c.Close()
   349  		}
   350  	}()
   351  
   352  	// Reject if connection is already present.
   353  	if mt.conns.Has(c) {
   354  		return ErrRejected{conn: c, isDuplicate: true}
   355  	}
   356  
   357  	// Resolve ips for incoming conn.
   358  	ips, err := resolveIPs(mt.resolver, c)
   359  	if err != nil {
   360  		return err
   361  	}
   362  
   363  	errc := make(chan error, len(mt.connFilters))
   364  
   365  	for _, f := range mt.connFilters {
   366  		go func(f ConnFilterFunc, c net.Conn, ips []net.IP, errc chan<- error) {
   367  			errc <- f(mt.conns, c, ips)
   368  		}(f, c, ips, errc)
   369  	}
   370  
   371  	for i := 0; i < cap(errc); i++ {
   372  		select {
   373  		case err := <-errc:
   374  			if err != nil {
   375  				return ErrRejected{conn: c, err: err, isFiltered: true}
   376  			}
   377  		case <-time.After(mt.filterTimeout):
   378  			return ErrFilterTimeout{}
   379  		}
   380  
   381  	}
   382  
   383  	mt.conns.Set(c, ips)
   384  
   385  	return nil
   386  }
   387  
   388  func (mt *MultiplexTransport) upgrade(
   389  	c net.Conn,
   390  	dialedAddr *NetAddress,
   391  ) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) {
   392  	defer func() {
   393  		if err != nil {
   394  			_ = mt.cleanup(c)
   395  		}
   396  	}()
   397  
   398  	secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey)
   399  	if err != nil {
   400  		return nil, nil, ErrRejected{
   401  			conn:          c,
   402  			err:           fmt.Errorf("secret conn failed: %v", err),
   403  			isAuthFailure: true,
   404  		}
   405  	}
   406  
   407  	// For outgoing conns, ensure connection key matches dialed key.
   408  	connID := PubKeyToID(secretConn.RemotePubKey())
   409  	if dialedAddr != nil {
   410  		if dialedID := dialedAddr.ID; connID != dialedID {
   411  			return nil, nil, ErrRejected{
   412  				conn: c,
   413  				id:   connID,
   414  				err: fmt.Errorf(
   415  					"conn.ID (%v) dialed ID (%v) mismatch",
   416  					connID,
   417  					dialedID,
   418  				),
   419  				isAuthFailure: true,
   420  			}
   421  		}
   422  	}
   423  
   424  	nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo)
   425  	if err != nil {
   426  		return nil, nil, ErrRejected{
   427  			conn:          c,
   428  			err:           fmt.Errorf("handshake failed: %v", err),
   429  			isAuthFailure: true,
   430  		}
   431  	}
   432  
   433  	if err := nodeInfo.Validate(); err != nil {
   434  		return nil, nil, ErrRejected{
   435  			conn:              c,
   436  			err:               err,
   437  			isNodeInfoInvalid: true,
   438  		}
   439  	}
   440  
   441  	// Ensure connection key matches self reported key.
   442  	if connID != nodeInfo.ID() {
   443  		return nil, nil, ErrRejected{
   444  			conn: c,
   445  			id:   connID,
   446  			err: fmt.Errorf(
   447  				"conn.ID (%v) NodeInfo.ID (%v) mismatch",
   448  				connID,
   449  				nodeInfo.ID(),
   450  			),
   451  			isAuthFailure: true,
   452  		}
   453  	}
   454  
   455  	// Reject self.
   456  	if mt.nodeInfo.ID() == nodeInfo.ID() {
   457  		return nil, nil, ErrRejected{
   458  			addr:   *NewNetAddress(nodeInfo.ID(), c.RemoteAddr()),
   459  			conn:   c,
   460  			id:     nodeInfo.ID(),
   461  			isSelf: true,
   462  		}
   463  	}
   464  
   465  	if err := mt.nodeInfo.CompatibleWith(nodeInfo); err != nil {
   466  		return nil, nil, ErrRejected{
   467  			conn:           c,
   468  			err:            err,
   469  			id:             nodeInfo.ID(),
   470  			isIncompatible: true,
   471  		}
   472  	}
   473  
   474  	return secretConn, nodeInfo, nil
   475  }
   476  
   477  func (mt *MultiplexTransport) wrapPeer(
   478  	c net.Conn,
   479  	ni NodeInfo,
   480  	cfg peerConfig,
   481  	socketAddr *NetAddress,
   482  ) Peer {
   483  
   484  	persistent := false
   485  	if cfg.isPersistent != nil {
   486  		if cfg.outbound {
   487  			persistent = cfg.isPersistent(socketAddr)
   488  		} else {
   489  			selfReportedAddr, err := ni.NetAddress()
   490  			if err == nil {
   491  				persistent = cfg.isPersistent(selfReportedAddr)
   492  			}
   493  		}
   494  	}
   495  
   496  	peerConn := newPeerConn(
   497  		cfg.outbound,
   498  		persistent,
   499  		c,
   500  		socketAddr,
   501  	)
   502  
   503  	p := newPeer(
   504  		peerConn,
   505  		mt.mConfig,
   506  		ni,
   507  		cfg.reactorsByCh,
   508  		cfg.chDescs,
   509  		cfg.onPeerError,
   510  		PeerMetrics(cfg.metrics),
   511  	)
   512  
   513  	return p
   514  }
   515  
   516  func handshake(
   517  	c net.Conn,
   518  	timeout time.Duration,
   519  	nodeInfo NodeInfo,
   520  ) (NodeInfo, error) {
   521  	if err := c.SetDeadline(time.Now().Add(timeout)); err != nil {
   522  		return nil, err
   523  	}
   524  
   525  	var (
   526  		errc = make(chan error, 2)
   527  
   528  		peerNodeInfo DefaultNodeInfo
   529  		ourNodeInfo  = nodeInfo.(DefaultNodeInfo)
   530  	)
   531  
   532  	go func(errc chan<- error, c net.Conn) {
   533  		_, err := cdc.MarshalBinaryLengthPrefixedWriter(c, ourNodeInfo)
   534  		errc <- err
   535  	}(errc, c)
   536  	go func(errc chan<- error, c net.Conn) {
   537  		_, err := cdc.UnmarshalBinaryLengthPrefixedReader(
   538  			c,
   539  			&peerNodeInfo,
   540  			int64(MaxNodeInfoSize()),
   541  		)
   542  		errc <- err
   543  	}(errc, c)
   544  
   545  	for i := 0; i < cap(errc); i++ {
   546  		err := <-errc
   547  		if err != nil {
   548  			return nil, err
   549  		}
   550  	}
   551  
   552  	return peerNodeInfo, c.SetDeadline(time.Time{})
   553  }
   554  
   555  func upgradeSecretConn(
   556  	c net.Conn,
   557  	timeout time.Duration,
   558  	privKey crypto.PrivKey,
   559  ) (*conn.SecretConnection, error) {
   560  	if err := c.SetDeadline(time.Now().Add(timeout)); err != nil {
   561  		return nil, err
   562  	}
   563  
   564  	sc, err := conn.MakeSecretConnection(c, privKey)
   565  	if err != nil {
   566  		return nil, err
   567  	}
   568  
   569  	return sc, sc.SetDeadline(time.Time{})
   570  }
   571  
   572  func resolveIPs(resolver IPResolver, c net.Conn) ([]net.IP, error) {
   573  	host, _, err := net.SplitHostPort(c.RemoteAddr().String())
   574  	if err != nil {
   575  		return nil, err
   576  	}
   577  
   578  	addrs, err := resolver.LookupIPAddr(context.Background(), host)
   579  	if err != nil {
   580  		return nil, err
   581  	}
   582  
   583  	ips := []net.IP{}
   584  
   585  	for _, addr := range addrs {
   586  		ips = append(ips, addr.IP)
   587  	}
   588  
   589  	return ips, nil
   590  }