github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/p2p/net/swarm/swarm_listen.go (about)

     1  package swarm
     2  
     3  import (
     4  	"fmt"
     5  
     6  	mconn "github.com/ipfs/go-ipfs/metrics/conn"
     7  	inet "github.com/ipfs/go-ipfs/p2p/net"
     8  	conn "github.com/ipfs/go-ipfs/p2p/net/conn"
     9  	addrutil "github.com/ipfs/go-ipfs/p2p/net/swarm/addr"
    10  	lgbl "github.com/ipfs/go-ipfs/util/eventlog/loggables"
    11  
    12  	ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
    13  	manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
    14  	ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream"
    15  	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
    16  	multierr "github.com/ipfs/go-ipfs/thirdparty/multierr"
    17  )
    18  
    19  // Open listeners for each network the swarm should listen on
    20  func (s *Swarm) listen(addrs []ma.Multiaddr) error {
    21  
    22  	for _, addr := range addrs {
    23  		if !addrutil.AddrUsable(addr, true) {
    24  			return fmt.Errorf("cannot use addr: %s", addr)
    25  		}
    26  	}
    27  
    28  	retErr := multierr.New()
    29  
    30  	// listen on every address
    31  	for i, addr := range addrs {
    32  		err := s.setupListener(addr)
    33  		if err != nil {
    34  			if retErr.Errors == nil {
    35  				retErr.Errors = make([]error, len(addrs))
    36  			}
    37  			retErr.Errors[i] = err
    38  			log.Debugf("Failed to listen on: %s - %s", addr, err)
    39  		}
    40  	}
    41  
    42  	if retErr.Errors != nil {
    43  		return retErr
    44  	}
    45  	return nil
    46  }
    47  
    48  // Listen for new connections on the given multiaddr
    49  func (s *Swarm) setupListener(maddr ma.Multiaddr) error {
    50  
    51  	// TODO rethink how this has to work. (jbenet)
    52  	//
    53  	// resolved, err := resolveUnspecifiedAddresses([]ma.Multiaddr{maddr})
    54  	// if err != nil {
    55  	// 	return err
    56  	// }
    57  	// for _, a := range resolved {
    58  	// 	s.peers.AddAddr(s.local, a)
    59  	// }
    60  
    61  	sk := s.peers.PrivKey(s.local)
    62  	if sk == nil {
    63  		// may be fine for sk to be nil, just log a warning.
    64  		log.Warning("Listener not given PrivateKey, so WILL NOT SECURE conns.")
    65  	}
    66  	log.Debugf("Swarm Listening at %s", maddr)
    67  	list, err := conn.Listen(s.Context(), maddr, s.local, sk)
    68  	if err != nil {
    69  		return err
    70  	}
    71  
    72  	list.SetAddrFilters(s.Filters)
    73  
    74  	if cw, ok := list.(conn.ListenerConnWrapper); ok {
    75  		cw.SetConnWrapper(func(c manet.Conn) manet.Conn {
    76  			return mconn.WrapConn(s.bwc, c)
    77  		})
    78  	}
    79  
    80  	// AddListener to the peerstream Listener. this will begin accepting connections
    81  	// and streams!
    82  	sl, err := s.swarm.AddListener(list)
    83  	if err != nil {
    84  		return err
    85  	}
    86  	log.Debugf("Swarm Listeners at %s", s.ListenAddresses())
    87  
    88  	// signal to our notifiees on successful conn.
    89  	s.notifyAll(func(n inet.Notifiee) {
    90  		n.Listen((*Network)(s), maddr)
    91  	})
    92  
    93  	// go consume peerstream's listen accept errors. note, these ARE errors.
    94  	// they may be killing the listener, and if we get _any_ we should be
    95  	// fixing this in our conn.Listener (to ignore them or handle them
    96  	// differently.)
    97  	go func(ctx context.Context, sl *ps.Listener) {
    98  
    99  		// signal to our notifiees closing
   100  		defer s.notifyAll(func(n inet.Notifiee) {
   101  			n.ListenClose((*Network)(s), maddr)
   102  		})
   103  
   104  		for {
   105  			select {
   106  			case err, more := <-sl.AcceptErrors():
   107  				if !more {
   108  					return
   109  				}
   110  				log.Warningf("swarm listener accept error: %s", err)
   111  			case <-ctx.Done():
   112  				return
   113  			}
   114  		}
   115  	}(s.Context(), sl)
   116  
   117  	return nil
   118  }
   119  
   120  // connHandler is called by the StreamSwarm whenever a new connection is added
   121  // here we configure it slightly. Note that this is sequential, so if anything
   122  // will take a while do it in a goroutine.
   123  // See https://godoc.org/github.com/jbenet/go-peerstream for more information
   124  func (s *Swarm) connHandler(c *ps.Conn) *Conn {
   125  	ctx := context.Background()
   126  	// this context is for running the handshake, which -- when receiveing connections
   127  	// -- we have no bound on beyond what the transport protocol bounds it at.
   128  	// note that setup + the handshake are bounded by underlying io.
   129  	// (i.e. if TCP or UDP disconnects (or the swarm closes), we're done.
   130  	// Q: why not have a shorter handshake? think about an HTTP server on really slow conns.
   131  	// as long as the conn is live (TCP says its online), it tries its best. we follow suit.)
   132  
   133  	sc, err := s.newConnSetup(ctx, c)
   134  	if err != nil {
   135  		log.Debug(err)
   136  		log.Event(ctx, "newConnHandlerDisconnect", lgbl.NetConn(c.NetConn()), lgbl.Error(err))
   137  		c.Close() // boom. close it.
   138  		return nil
   139  	}
   140  
   141  	return sc
   142  }