github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go (about) 1 package initialsync 2 3 import ( 4 "context" 5 "math" 6 "sync" 7 "time" 8 9 "github.com/libp2p/go-libp2p-core/peer" 10 "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" 11 "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers" 12 "github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags" 13 "github.com/prysmaticlabs/prysm/shared/mathutil" 14 "github.com/prysmaticlabs/prysm/shared/params" 15 "github.com/prysmaticlabs/prysm/shared/timeutils" 16 "github.com/sirupsen/logrus" 17 "go.opencensus.io/trace" 18 ) 19 20 // peerLock returns peer lock for a given peer. If lock is not found, it is created. 21 func (f *blocksFetcher) peerLock(pid peer.ID) *peerLock { 22 f.Lock() 23 defer f.Unlock() 24 if lock, ok := f.peerLocks[pid]; ok && lock != nil { 25 lock.accessed = timeutils.Now() 26 return lock 27 } 28 f.peerLocks[pid] = &peerLock{ 29 Mutex: sync.Mutex{}, 30 accessed: timeutils.Now(), 31 } 32 return f.peerLocks[pid] 33 } 34 35 // removeStalePeerLocks is a cleanup procedure which removes stale locks. 36 func (f *blocksFetcher) removeStalePeerLocks(age time.Duration) { 37 f.Lock() 38 defer f.Unlock() 39 for peerID, lock := range f.peerLocks { 40 if time.Since(lock.accessed) >= age { 41 lock.Lock() 42 delete(f.peerLocks, peerID) 43 lock.Unlock() 44 } 45 } 46 } 47 48 // selectFailOverPeer randomly selects fail over peer from the list of available peers. 49 func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID) (peer.ID, error) { 50 if len(peers) == 0 { 51 return "", errNoPeersAvailable 52 } 53 if len(peers) == 1 && peers[0] == excludedPID { 54 return "", errNoPeersAvailable 55 } 56 57 ind := f.rand.Int() % len(peers) 58 if peers[ind] == excludedPID { 59 return f.selectFailOverPeer(excludedPID, append(peers[:ind], peers[ind+1:]...)) 60 } 61 return peers[ind], nil 62 } 63 64 // waitForMinimumPeers spins and waits up until enough peers are available. 65 func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) { 66 required := params.BeaconConfig().MaxPeersToSync 67 if flags.Get().MinimumSyncPeers < required { 68 required = flags.Get().MinimumSyncPeers 69 } 70 for { 71 if ctx.Err() != nil { 72 return nil, ctx.Err() 73 } 74 var peers []peer.ID 75 if f.mode == modeStopOnFinalizedEpoch { 76 headEpoch := f.chain.FinalizedCheckpt().Epoch 77 _, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch) 78 } else { 79 headEpoch := helpers.SlotToEpoch(f.chain.HeadSlot()) 80 _, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch) 81 } 82 if len(peers) >= required { 83 return peers, nil 84 } 85 log.WithFields(logrus.Fields{ 86 "suitable": len(peers), 87 "required": required}).Info("Waiting for enough suitable peers before syncing") 88 time.Sleep(handshakePollingInterval) 89 } 90 } 91 92 // filterPeers returns transformed list of peers, weight sorted by scores and capacity remaining. 93 // List can be further constrained using peersPercentage, where only percentage of peers are returned. 94 func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersPercentage float64) []peer.ID { 95 ctx, span := trace.StartSpan(ctx, "initialsync.filterPeers") 96 defer span.End() 97 98 if len(peers) == 0 { 99 return peers 100 } 101 102 // Sort peers using both block provider score and, custom, capacity based score (see 103 // peerFilterCapacityWeight if you want to give different weights to provider's and capacity 104 // scores). 105 // Scores produced are used as weights, so peers are ordered probabilistically i.e. peer with 106 // a higher score has higher chance to end up higher in the list. 107 scorer := f.p2p.Peers().Scorers().BlockProviderScorer() 108 peers = scorer.WeightSorted(f.rand, peers, func(peerID peer.ID, blockProviderScore float64) float64 { 109 remaining, capacity := float64(f.rateLimiter.Remaining(peerID.String())), float64(f.rateLimiter.Capacity()) 110 // When capacity is close to exhaustion, allow less performant peer to take a chance. 111 // Otherwise, there's a good chance system will be forced to wait for rate limiter. 112 if remaining < float64(f.blocksPerSecond) { 113 return 0.0 114 } 115 capScore := remaining / capacity 116 overallScore := blockProviderScore*(1.0-f.capacityWeight) + capScore*f.capacityWeight 117 return math.Round(overallScore*scorers.ScoreRoundingFactor) / scorers.ScoreRoundingFactor 118 }) 119 120 return trimPeers(peers, peersPercentage) 121 } 122 123 // trimPeers limits peer list, returning only specified percentage of peers. 124 // Takes system constraints into account (min/max peers to sync). 125 func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID { 126 required := params.BeaconConfig().MaxPeersToSync 127 if flags.Get().MinimumSyncPeers < required { 128 required = flags.Get().MinimumSyncPeers 129 } 130 // Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected. 131 limit := uint64(math.Round(float64(len(peers)) * peersPercentage)) 132 // Limit cannot be less that minimum peers required by sync mechanism. 133 limit = mathutil.Max(limit, uint64(required)) 134 // Limit cannot be higher than number of peers available (safe-guard). 135 limit = mathutil.Min(limit, uint64(len(peers))) 136 return peers[:limit] 137 }