github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/p2p/net/swarm/swarm_conn.go (about)

     1  package swarm
     2  
     3  import (
     4  	"fmt"
     5  
     6  	ic "github.com/ipfs/go-ipfs/p2p/crypto"
     7  	inet "github.com/ipfs/go-ipfs/p2p/net"
     8  	conn "github.com/ipfs/go-ipfs/p2p/net/conn"
     9  	peer "github.com/ipfs/go-ipfs/p2p/peer"
    10  
    11  	ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
    12  	ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream"
    13  	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
    14  )
    15  
    16  // a Conn is a simple wrapper around a ps.Conn that also exposes
    17  // some of the methods from the underlying conn.Conn.
    18  // There's **five** "layers" to each connection:
    19  //  * 0. the net.Conn - underlying net.Conn (TCP/UDP/UTP/etc)
    20  //  * 1. the manet.Conn - provides multiaddr friendly Conn
    21  //  * 2. the conn.Conn - provides Peer friendly Conn (inc Secure channel)
    22  //  * 3. the peerstream.Conn - provides peerstream / spdysptream happiness
    23  //  * 4. the Conn - abstracts everyting out, exposing only key parts of underlying layers
    24  // (I know, this is kinda crazy. it's more historical than a good design. though the
    25  // layers do build up pieces of functionality. and they're all just io.RW :) )
    26  type Conn ps.Conn
    27  
    28  // ConnHandler is called when new conns are opened from remote peers.
    29  // See peerstream.ConnHandler
    30  type ConnHandler func(*Conn)
    31  
    32  func (c *Conn) StreamConn() *ps.Conn {
    33  	return (*ps.Conn)(c)
    34  }
    35  
    36  func (c *Conn) RawConn() conn.Conn {
    37  	// righly panic if these things aren't true. it is an expected
    38  	// invariant that these Conns are all of the typewe expect:
    39  	// 		ps.Conn wrapping a conn.Conn
    40  	// if we get something else it is programmer error.
    41  	return (*ps.Conn)(c).NetConn().(conn.Conn)
    42  }
    43  
    44  func (c *Conn) String() string {
    45  	return fmt.Sprintf("<SwarmConn %s>", c.RawConn())
    46  }
    47  
    48  // LocalMultiaddr is the Multiaddr on this side
    49  func (c *Conn) LocalMultiaddr() ma.Multiaddr {
    50  	return c.RawConn().LocalMultiaddr()
    51  }
    52  
    53  // LocalPeer is the Peer on our side of the connection
    54  func (c *Conn) LocalPeer() peer.ID {
    55  	return c.RawConn().LocalPeer()
    56  }
    57  
    58  // RemoteMultiaddr is the Multiaddr on the remote side
    59  func (c *Conn) RemoteMultiaddr() ma.Multiaddr {
    60  	return c.RawConn().RemoteMultiaddr()
    61  }
    62  
    63  // RemotePeer is the Peer on the remote side
    64  func (c *Conn) RemotePeer() peer.ID {
    65  	return c.RawConn().RemotePeer()
    66  }
    67  
    68  // LocalPrivateKey is the public key of the peer on this side
    69  func (c *Conn) LocalPrivateKey() ic.PrivKey {
    70  	return c.RawConn().LocalPrivateKey()
    71  }
    72  
    73  // RemotePublicKey is the public key of the peer on the remote side
    74  func (c *Conn) RemotePublicKey() ic.PubKey {
    75  	return c.RawConn().RemotePublicKey()
    76  }
    77  
    78  // NewSwarmStream returns a new Stream from this connection
    79  func (c *Conn) NewSwarmStream() (*Stream, error) {
    80  	s, err := c.StreamConn().NewStream()
    81  	return wrapStream(s), err
    82  }
    83  
    84  // NewStream returns a new Stream from this connection
    85  func (c *Conn) NewStream() (inet.Stream, error) {
    86  	s, err := c.NewSwarmStream()
    87  	return inet.Stream(s), err
    88  }
    89  
    90  func (c *Conn) Close() error {
    91  	return c.StreamConn().Close()
    92  }
    93  
    94  func wrapConn(psc *ps.Conn) (*Conn, error) {
    95  	// grab the underlying connection.
    96  	if _, ok := psc.NetConn().(conn.Conn); !ok {
    97  		// this should never happen. if we see it ocurring it means that we added
    98  		// a Listener to the ps.Swarm that is NOT one of our net/conn.Listener.
    99  		return nil, fmt.Errorf("swarm connHandler: invalid conn (not a conn.Conn): %s", psc)
   100  	}
   101  	return (*Conn)(psc), nil
   102  }
   103  
   104  // wrapConns returns a *Conn for all these ps.Conns
   105  func wrapConns(conns1 []*ps.Conn) []*Conn {
   106  	conns2 := make([]*Conn, len(conns1))
   107  	for i, c1 := range conns1 {
   108  		if c2, err := wrapConn(c1); err == nil {
   109  			conns2[i] = c2
   110  		}
   111  	}
   112  	return conns2
   113  }
   114  
   115  // newConnSetup does the swarm's "setup" for a connection. returns the underlying
   116  // conn.Conn this method is used by both swarm.Dial and ps.Swarm connHandler
   117  func (s *Swarm) newConnSetup(ctx context.Context, psConn *ps.Conn) (*Conn, error) {
   118  
   119  	// wrap with a Conn
   120  	sc, err := wrapConn(psConn)
   121  	if err != nil {
   122  		return nil, err
   123  	}
   124  
   125  	// if we have a public key, make sure we add it to our peerstore!
   126  	// This is an important detail. Otherwise we must fetch the public
   127  	// key from the DHT or some other system.
   128  	if pk := sc.RemotePublicKey(); pk != nil {
   129  		s.peers.AddPubKey(sc.RemotePeer(), pk)
   130  	}
   131  
   132  	// ok great! we can use it. add it to our group.
   133  
   134  	// set the RemotePeer as a group on the conn. this lets us group
   135  	// connections in the StreamSwarm by peer, and get a streams from
   136  	// any available connection in the group (better multiconn):
   137  	//   swarm.StreamSwarm().NewStreamWithGroup(remotePeer)
   138  	psConn.AddGroup(sc.RemotePeer())
   139  
   140  	return sc, nil
   141  }