github.com/amazechain/amc@v0.1.3/internal/sync/initial-sync/blocks_fetcher_peers.go (about)

     1  package initialsync
     2  
     3  import (
     4  	"context"
     5  	"github.com/amazechain/amc/internal/p2p/peers/scorers"
     6  	"math"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/libp2p/go-libp2p/core/peer"
    11  	"go.opencensus.io/trace"
    12  )
    13  
    14  // peerLock returns peer lock for a given peer. If lock is not found, it is created.
    15  func (f *blocksFetcher) peerLock(pid peer.ID) *peerLock {
    16  	f.Lock()
    17  	defer f.Unlock()
    18  	if lock, ok := f.peerLocks[pid]; ok && lock != nil {
    19  		lock.accessed = time.Now()
    20  		return lock
    21  	}
    22  	f.peerLocks[pid] = &peerLock{
    23  		Mutex:    sync.Mutex{},
    24  		accessed: time.Now(),
    25  	}
    26  	return f.peerLocks[pid]
    27  }
    28  
    29  // removeStalePeerLocks is a cleanup procedure which removes stale locks.
    30  func (f *blocksFetcher) removeStalePeerLocks(age time.Duration) {
    31  	f.Lock()
    32  	defer f.Unlock()
    33  	for peerID, lock := range f.peerLocks {
    34  		if time.Since(lock.accessed) >= age {
    35  			lock.Lock()
    36  			delete(f.peerLocks, peerID)
    37  			lock.Unlock()
    38  		}
    39  	}
    40  }
    41  
    42  // selectFailOverPeer randomly selects fail over peer from the list of available peers.
    43  func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID) (peer.ID, error) {
    44  	if len(peers) == 0 {
    45  		return "", errNoPeersAvailable
    46  	}
    47  	if len(peers) == 1 && peers[0] == excludedPID {
    48  		return "", errNoPeersAvailable
    49  	}
    50  
    51  	ind := f.rand.Int() % len(peers)
    52  	if peers[ind] == excludedPID {
    53  		return f.selectFailOverPeer(excludedPID, append(peers[:ind], peers[ind+1:]...))
    54  	}
    55  	return peers[ind], nil
    56  }
    57  
    58  // waitForMinimumPeers spins and waits up until enough peers are available.
    59  func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) {
    60  	required := f.p2p.GetConfig().MinSyncPeers
    61  	for {
    62  		if ctx.Err() != nil {
    63  			return nil, ctx.Err()
    64  		}
    65  		_, peers := f.p2p.Peers().BestPeers(f.p2p.GetConfig().MinSyncPeers, f.chain.CurrentBlock().Number64())
    66  		if len(peers) >= required {
    67  			return peers, nil
    68  		}
    69  		log.Info("Waiting for enough suitable peers before syncing (blocksFetcher)", "suitable", len(peers), "required", required)
    70  		time.Sleep(handshakePollingInterval)
    71  	}
    72  }
    73  
    74  // filterPeers returns transformed list of peers, weight sorted by scores and capacity remaining.
    75  // List can be further constrained using peersPercentage, where only percentage of peers are returned.
    76  func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersPercentage float64) []peer.ID {
    77  	ctx, span := trace.StartSpan(ctx, "initialsync.filterPeers")
    78  	defer span.End()
    79  
    80  	if len(peers) == 0 {
    81  		return peers
    82  	}
    83  
    84  	// Sort peers using both block provider score and, custom, capacity based score (see
    85  	// peerFilterCapacityWeight if you want to give different weights to provider's and capacity
    86  	// scores).
    87  	// Scores produced are used as weights, so peers are ordered probabilistically i.e. peer with
    88  	// a higher score has higher chance to end up higher in the list.
    89  	scorer := f.p2p.Peers().Scorers().BlockProviderScorer()
    90  	peers = scorer.WeightSorted(f.rand, peers, func(peerID peer.ID, blockProviderScore float64) float64 {
    91  		remaining, capacity := float64(f.rateLimiter.Remaining(peerID.String())), float64(f.rateLimiter.Capacity())
    92  		// When capacity is close to exhaustion, allow less performant peer to take a chance.
    93  		// Otherwise, there's a good chance system will be forced to wait for rate limiter.
    94  		if remaining < float64(f.blocksPerPeriod) {
    95  			return 0.0
    96  		}
    97  		capScore := remaining / capacity
    98  		overallScore := blockProviderScore*(1.0-f.capacityWeight) + capScore*f.capacityWeight
    99  		return math.Round(overallScore*scorers.ScoreRoundingFactor) / scorers.ScoreRoundingFactor
   100  	})
   101  
   102  	return trimPeers(peers, peersPercentage, f.p2p.GetConfig().MinSyncPeers)
   103  }
   104  
   105  // trimPeers limits peer list, returning only specified percentage of peers.
   106  // Takes system constraints into account (min/max peers to sync).
   107  func trimPeers(peers []peer.ID, peersPercentage float64, MinSyncPeers int) []peer.ID {
   108  	// todo
   109  	required := MinSyncPeers
   110  	// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
   111  	limit := math.Round(float64(len(peers)) * peersPercentage)
   112  	// Limit cannot be less that minimum peers required by sync mechanism.
   113  	limit = math.Max(limit, float64(required))
   114  	// Limit cannot be higher than number of peers available (safe-guard).
   115  	limit = math.Min(limit, float64(len(peers)))
   116  
   117  	limit = math.Floor(limit)
   118  
   119  	return peers[:uint64(limit)]
   120  }