github.com/NebulousLabs/Sia@v1.3.7/modules/gateway/peersmanager.go (about)

     1  package gateway
     2  
     3  import (
     4  	"github.com/NebulousLabs/Sia/build"
     5  	"github.com/NebulousLabs/Sia/modules"
     6  	"github.com/NebulousLabs/fastrand"
     7  )
     8  
     9  // managedPeerManagerConnect is a blocking function which tries to connect to
    10  // the input addreess as a peer.
    11  func (g *Gateway) managedPeerManagerConnect(addr modules.NetAddress) {
    12  	g.log.Debugf("[PMC] [%v] Attempting connection", addr)
    13  	err := g.managedConnect(addr)
    14  	if err == errPeerExists {
    15  		// This peer is already connected to us. Safety around the
    16  		// oubound peers relates to the fact that we have picked out
    17  		// the outbound peers instead of allow the attacker to pick out
    18  		// the peers for us. Because we have made the selection, it is
    19  		// okay to set the peer as an outbound peer.
    20  		//
    21  		// The nodelist size check ensures that an attacker can't flood
    22  		// a new node with a bunch of inbound requests. Doing so would
    23  		// result in a nodelist that's entirely full of attacker nodes.
    24  		// There's not much we can do about that anyway, but at least
    25  		// we can hold off making attacker nodes 'outbound' peers until
    26  		// our nodelist has had time to fill up naturally.
    27  		g.mu.Lock()
    28  		p, exists := g.peers[addr]
    29  		if exists {
    30  			// Have to check it exists because we released the lock, a
    31  			// race condition could mean that the peer was disconnected
    32  			// before this code block was reached.
    33  			p.Inbound = false
    34  			if n, ok := g.nodes[p.NetAddress]; ok && !n.WasOutboundPeer {
    35  				n.WasOutboundPeer = true
    36  				g.nodes[n.NetAddress] = n
    37  			}
    38  			g.log.Debugf("[PMC] [SUCCESS] [%v] existing peer has been converted to outbound peer", addr)
    39  			g.callInitRPCs(p.NetAddress)
    40  		}
    41  		g.mu.Unlock()
    42  	} else if err != nil {
    43  		g.log.Debugf("[PMC] [ERROR] [%v] WARN: removing peer because automatic connect failed: %v\n", addr, err)
    44  
    45  		// Remove the node, but only if there are enough nodes in the node list.
    46  		g.mu.Lock()
    47  		if len(g.nodes) > pruneNodeListLen {
    48  			g.removeNode(addr)
    49  		}
    50  		g.mu.Unlock()
    51  	} else {
    52  		g.log.Debugf("[PMC] [SUCCESS] [%v] peer successfully added", addr)
    53  	}
    54  }
    55  
    56  // numOutboundPeers returns the number of outbound peers in the gateway.
    57  func (g *Gateway) numOutboundPeers() int {
    58  	n := 0
    59  	for _, p := range g.peers {
    60  		if !p.Inbound {
    61  			n++
    62  		}
    63  	}
    64  	return n
    65  }
    66  
    67  // permanentPeerManager tries to keep the Gateway well-connected. As long as
    68  // the Gateway is not well-connected, it tries to connect to random nodes.
    69  func (g *Gateway) permanentPeerManager(closedChan chan struct{}) {
    70  	// Send a signal upon shutdown.
    71  	defer close(closedChan)
    72  	defer g.log.Debugln("INFO: [PPM] Permanent peer manager is shutting down")
    73  
    74  	// permanentPeerManager will attempt to connect to peers asynchronously,
    75  	// such that multiple connection attempts can be open at once, but a
    76  	// limited number.
    77  	connectionLimiterChan := make(chan struct{}, maxConcurrentOutboundPeerRequests)
    78  
    79  	g.log.Debugln("INFO: [PPM] Permanent peer manager has started")
    80  
    81  	for {
    82  		// Fetch the set of nodes to try.
    83  		g.mu.RLock()
    84  		nodes := g.buildPeerManagerNodeList()
    85  		g.mu.RUnlock()
    86  		if len(nodes) == 0 {
    87  			g.log.Debugln("[PPM] Node list is empty, sleeping")
    88  			if !g.managedSleep(noNodesDelay) {
    89  				return
    90  			}
    91  			continue
    92  		}
    93  
    94  		for _, addr := range nodes {
    95  			// Break as soon as we have enough outbound peers.
    96  			g.mu.RLock()
    97  			numOutboundPeers := g.numOutboundPeers()
    98  			isOutboundPeer := g.peers[addr] != nil && !g.peers[addr].Inbound
    99  			g.mu.RUnlock()
   100  			if numOutboundPeers >= wellConnectedThreshold {
   101  				g.log.Debugln("INFO: [PPM] Gateway has enough peers, sleeping.")
   102  				if !g.managedSleep(wellConnectedDelay) {
   103  					return
   104  				}
   105  				break
   106  			}
   107  			if isOutboundPeer {
   108  				// Skip current outbound peers.
   109  				if !g.managedSleep(acquiringPeersDelay) {
   110  					return
   111  				}
   112  				continue
   113  			}
   114  
   115  			g.log.Debugln("[PPM] Fetched a random node:", addr)
   116  
   117  			// We need at least some of our outbound peers to be remote peers. If
   118  			// we already have reached a certain threshold of outbound peers and
   119  			// this peer is a local peer, do not consider it for an outbound peer.
   120  			// Sleep briefly to prevent the gateway from hogging the CPU if all
   121  			// peers are local.
   122  			if numOutboundPeers >= maxLocalOutboundPeers && addr.IsLocal() && build.Release != "testing" {
   123  				g.log.Debugln("[PPM] Ignorning selected peer; this peer is local and we already have multiple outbound peers:", addr)
   124  				if !g.managedSleep(unwantedLocalPeerDelay) {
   125  					return
   126  				}
   127  				continue
   128  			}
   129  
   130  			// Try connecting to that peer in a goroutine. Do not block unless
   131  			// there are currently 3 or more peer connection attempts open at once.
   132  			// Before spawning the thread, make sure that there is enough room by
   133  			// throwing a struct into the buffered channel.
   134  			g.log.Debugln("[PPM] Trying to connect to a node:", addr)
   135  			connectionLimiterChan <- struct{}{}
   136  			go func(addr modules.NetAddress) {
   137  				// After completion, take the struct out of the channel so that the
   138  				// next thread may proceed.
   139  				defer func() {
   140  					<-connectionLimiterChan
   141  				}()
   142  
   143  				if err := g.threads.Add(); err != nil {
   144  					return
   145  				}
   146  				defer g.threads.Done()
   147  				// peerManagerConnect will handle all of its own logging.
   148  				g.managedPeerManagerConnect(addr)
   149  			}(addr)
   150  
   151  			// Wait a bit before trying the next peer. The peer connections are
   152  			// non-blocking, so they should be spaced out to avoid spinning up an
   153  			// uncontrolled number of threads and therefore peer connections.
   154  			if !g.managedSleep(acquiringPeersDelay) {
   155  				return
   156  			}
   157  		}
   158  	}
   159  }
   160  
   161  // buildPeerManagerNodeList returns the gateway's node list in the order that
   162  // permanentPeerManager should attempt to connect to them.
   163  func (g *Gateway) buildPeerManagerNodeList() []modules.NetAddress {
   164  	// flatten the node map, inserting in random order
   165  	nodes := make([]modules.NetAddress, len(g.nodes))
   166  	perm := fastrand.Perm(len(nodes))
   167  	for _, node := range g.nodes {
   168  		nodes[perm[0]] = node.NetAddress
   169  		perm = perm[1:]
   170  	}
   171  
   172  	// swap the outbound nodes to the front of the list
   173  	numOutbound := 0
   174  	for i, node := range nodes {
   175  		if g.nodes[node].WasOutboundPeer {
   176  			nodes[numOutbound], nodes[i] = nodes[i], nodes[numOutbound]
   177  			numOutbound++
   178  		}
   179  	}
   180  	return nodes
   181  }