github.com/celestiaorg/celestia-node@v0.15.0-beta.1/share/p2p/peers/pool.go (about)

     1  package peers
     2  
     3  import (
     4  	"context"
     5  	"sync"
     6  	"time"
     7  
     8  	"github.com/libp2p/go-libp2p/core/peer"
     9  )
    10  
    11  const defaultCleanupThreshold = 2
    12  
    13  // pool stores peers and provides methods for simple round-robin access.
    14  type pool struct {
    15  	m           sync.RWMutex
    16  	peersList   []peer.ID
    17  	statuses    map[peer.ID]status
    18  	cooldown    *timedQueue
    19  	activeCount int
    20  	nextIdx     int
    21  
    22  	hasPeer   bool
    23  	hasPeerCh chan struct{}
    24  
    25  	cleanupThreshold int
    26  }
    27  
    28  type status int
    29  
    30  const (
    31  	active status = iota
    32  	cooldown
    33  	removed
    34  )
    35  
    36  // newPool returns new empty pool.
    37  func newPool(peerCooldownTime time.Duration) *pool {
    38  	p := &pool{
    39  		peersList:        make([]peer.ID, 0),
    40  		statuses:         make(map[peer.ID]status),
    41  		hasPeerCh:        make(chan struct{}),
    42  		cleanupThreshold: defaultCleanupThreshold,
    43  	}
    44  	p.cooldown = newTimedQueue(peerCooldownTime, p.afterCooldown)
    45  	return p
    46  }
    47  
    48  // tryGet returns peer along with bool flag indicating success of operation.
    49  func (p *pool) tryGet() (peer.ID, bool) {
    50  	p.m.Lock()
    51  	defer p.m.Unlock()
    52  
    53  	if p.activeCount == 0 {
    54  		return "", false
    55  	}
    56  
    57  	// if pointer is out of range, point to first element
    58  	if p.nextIdx > len(p.peersList)-1 {
    59  		p.nextIdx = 0
    60  	}
    61  
    62  	start := p.nextIdx
    63  	for {
    64  		peerID := p.peersList[p.nextIdx]
    65  
    66  		p.nextIdx++
    67  		if p.nextIdx == len(p.peersList) {
    68  			p.nextIdx = 0
    69  		}
    70  
    71  		if p.statuses[peerID] == active {
    72  			return peerID, true
    73  		}
    74  
    75  		// full circle passed
    76  		if p.nextIdx == start {
    77  			return "", false
    78  		}
    79  	}
    80  }
    81  
    82  // next sends a peer to the returned channel when it becomes available.
    83  func (p *pool) next(ctx context.Context) <-chan peer.ID {
    84  	peerCh := make(chan peer.ID, 1)
    85  	go func() {
    86  		for {
    87  			if peerID, ok := p.tryGet(); ok {
    88  				peerCh <- peerID
    89  				return
    90  			}
    91  
    92  			p.m.RLock()
    93  			hasPeerCh := p.hasPeerCh
    94  			p.m.RUnlock()
    95  			select {
    96  			case <-hasPeerCh:
    97  			case <-ctx.Done():
    98  				return
    99  			}
   100  		}
   101  	}()
   102  	return peerCh
   103  }
   104  
   105  func (p *pool) add(peers ...peer.ID) {
   106  	p.m.Lock()
   107  	defer p.m.Unlock()
   108  
   109  	for _, peerID := range peers {
   110  		status, ok := p.statuses[peerID]
   111  		if ok && status != removed {
   112  			continue
   113  		}
   114  
   115  		if !ok {
   116  			p.peersList = append(p.peersList, peerID)
   117  		}
   118  
   119  		p.statuses[peerID] = active
   120  		p.activeCount++
   121  	}
   122  	p.checkHasPeers()
   123  }
   124  
   125  func (p *pool) remove(peers ...peer.ID) {
   126  	p.m.Lock()
   127  	defer p.m.Unlock()
   128  
   129  	for _, peerID := range peers {
   130  		if status, ok := p.statuses[peerID]; ok && status != removed {
   131  			p.statuses[peerID] = removed
   132  			if status == active {
   133  				p.activeCount--
   134  			}
   135  		}
   136  	}
   137  
   138  	// do cleanup if too much garbage
   139  	if len(p.peersList) >= p.activeCount+p.cleanupThreshold {
   140  		p.cleanup()
   141  	}
   142  	p.checkHasPeers()
   143  }
   144  
   145  func (p *pool) has(peer peer.ID) bool {
   146  	p.m.RLock()
   147  	defer p.m.RUnlock()
   148  
   149  	status, ok := p.statuses[peer]
   150  	return ok && status != removed
   151  }
   152  
   153  func (p *pool) peers() []peer.ID {
   154  	p.m.RLock()
   155  	defer p.m.RUnlock()
   156  
   157  	peers := make([]peer.ID, 0, len(p.peersList))
   158  	for peer, status := range p.statuses {
   159  		if status != removed {
   160  			peers = append(peers, peer)
   161  		}
   162  	}
   163  	return peers
   164  }
   165  
   166  // cleanup will reduce memory footprint of pool.
   167  func (p *pool) cleanup() {
   168  	newList := make([]peer.ID, 0, p.activeCount)
   169  	for _, peerID := range p.peersList {
   170  		status := p.statuses[peerID]
   171  		switch status {
   172  		case active, cooldown:
   173  			newList = append(newList, peerID)
   174  		case removed:
   175  			delete(p.statuses, peerID)
   176  		}
   177  	}
   178  	p.peersList = newList
   179  }
   180  
   181  func (p *pool) putOnCooldown(peerID peer.ID) {
   182  	p.m.Lock()
   183  	defer p.m.Unlock()
   184  
   185  	if status, ok := p.statuses[peerID]; ok && status == active {
   186  		p.cooldown.push(peerID)
   187  
   188  		p.statuses[peerID] = cooldown
   189  		p.activeCount--
   190  		p.checkHasPeers()
   191  	}
   192  }
   193  
   194  func (p *pool) afterCooldown(peerID peer.ID) {
   195  	p.m.Lock()
   196  	defer p.m.Unlock()
   197  
   198  	// item could have been already removed by the time afterCooldown is called
   199  	if status, ok := p.statuses[peerID]; !ok || status != cooldown {
   200  		return
   201  	}
   202  
   203  	p.statuses[peerID] = active
   204  	p.activeCount++
   205  	p.checkHasPeers()
   206  }
   207  
   208  // checkHasPeers will check and indicate if there are peers in the pool.
   209  func (p *pool) checkHasPeers() {
   210  	if p.activeCount > 0 && !p.hasPeer {
   211  		p.hasPeer = true
   212  		close(p.hasPeerCh)
   213  		return
   214  	}
   215  
   216  	if p.activeCount == 0 && p.hasPeer {
   217  		p.hasPeerCh = make(chan struct{})
   218  		p.hasPeer = false
   219  	}
   220  }
   221  
   222  func (p *pool) len() int {
   223  	p.m.RLock()
   224  	defer p.m.RUnlock()
   225  	return p.activeCount
   226  }