github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go (about)

     1  package initialsync
     2  
     3  import (
     4  	"context"
     5  	"math"
     6  	"sort"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/kevinms/leakybucket-go"
    12  	"github.com/libp2p/go-libp2p-core/peer"
    13  	types "github.com/prysmaticlabs/eth2-types"
    14  	"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
    15  	"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
    16  	"github.com/prysmaticlabs/prysm/shared/testutil/assert"
    17  	"github.com/prysmaticlabs/prysm/shared/testutil/require"
    18  	"github.com/prysmaticlabs/prysm/shared/timeutils"
    19  )
    20  
    21  func TestBlocksFetcher_selectFailOverPeer(t *testing.T) {
    22  	type args struct {
    23  		excludedPID peer.ID
    24  		peers       []peer.ID
    25  	}
    26  	fetcher := newBlocksFetcher(context.Background(), &blocksFetcherConfig{})
    27  	tests := []struct {
    28  		name    string
    29  		args    args
    30  		want    peer.ID
    31  		wantErr error
    32  	}{
    33  		{
    34  			name: "No peers provided",
    35  			args: args{
    36  				excludedPID: "a",
    37  				peers:       []peer.ID{},
    38  			},
    39  			want:    "",
    40  			wantErr: errNoPeersAvailable,
    41  		},
    42  		{
    43  			name: "Single peer which needs to be excluded",
    44  			args: args{
    45  				excludedPID: "a",
    46  				peers: []peer.ID{
    47  					"a",
    48  				},
    49  			},
    50  			want:    "",
    51  			wantErr: errNoPeersAvailable,
    52  		},
    53  		{
    54  			name: "Single peer available",
    55  			args: args{
    56  				excludedPID: "a",
    57  				peers: []peer.ID{
    58  					"cde",
    59  				},
    60  			},
    61  			want:    "cde",
    62  			wantErr: nil,
    63  		},
    64  		{
    65  			name: "Two peers available, excluded first",
    66  			args: args{
    67  				excludedPID: "a",
    68  				peers: []peer.ID{
    69  					"a", "cde",
    70  				},
    71  			},
    72  			want:    "cde",
    73  			wantErr: nil,
    74  		},
    75  		{
    76  			name: "Two peers available, excluded second",
    77  			args: args{
    78  				excludedPID: "a",
    79  				peers: []peer.ID{
    80  					"cde", "a",
    81  				},
    82  			},
    83  			want:    "cde",
    84  			wantErr: nil,
    85  		},
    86  		{
    87  			name: "Multiple peers available",
    88  			args: args{
    89  				excludedPID: "a",
    90  				peers: []peer.ID{
    91  					"a", "cde", "cde", "cde",
    92  				},
    93  			},
    94  			want:    "cde",
    95  			wantErr: nil,
    96  		},
    97  	}
    98  	for _, tt := range tests {
    99  		t.Run(tt.name, func(t *testing.T) {
   100  			got, err := fetcher.selectFailOverPeer(tt.args.excludedPID, tt.args.peers)
   101  			if tt.wantErr != nil {
   102  				assert.ErrorContains(t, tt.wantErr.Error(), err)
   103  			} else {
   104  				assert.Equal(t, tt.want, got)
   105  			}
   106  		})
   107  	}
   108  }
   109  
   110  func TestBlocksFetcher_filterPeers(t *testing.T) {
   111  	type weightedPeer struct {
   112  		peer.ID
   113  		usedCapacity int64
   114  	}
   115  	type args struct {
   116  		peers           []weightedPeer
   117  		peersPercentage float64
   118  		capacityWeight  float64
   119  	}
   120  
   121  	batchSize := uint64(flags.Get().BlockBatchLimit)
   122  	tests := []struct {
   123  		name   string
   124  		args   args
   125  		update func(s *scorers.BlockProviderScorer)
   126  		want   []peer.ID
   127  	}{
   128  		{
   129  			name: "no peers available",
   130  			args: args{
   131  				peers:           []weightedPeer{},
   132  				peersPercentage: 1.0,
   133  				capacityWeight:  0.2,
   134  			},
   135  			want: []peer.ID{},
   136  		},
   137  		{
   138  			name: "single peer",
   139  			args: args{
   140  				peers: []weightedPeer{
   141  					{"a", 1200},
   142  				},
   143  				peersPercentage: 1.0,
   144  				capacityWeight:  0.2,
   145  			},
   146  			want: []peer.ID{"a"},
   147  		},
   148  		{
   149  			name: "multiple peers same capacity",
   150  			args: args{
   151  				peers: []weightedPeer{
   152  					{"a", 2400},
   153  					{"b", 2400},
   154  					{"c", 2400},
   155  				},
   156  				peersPercentage: 1.0,
   157  				capacityWeight:  0.2,
   158  			},
   159  			want: []peer.ID{"a", "b", "c"},
   160  		},
   161  		{
   162  			name: "multiple peers capacity as tie-breaker",
   163  			args: args{
   164  				peers: []weightedPeer{
   165  					{"a", 6000},
   166  					{"b", 3000},
   167  					{"c", 0},
   168  					{"d", 9000},
   169  					{"e", 6000},
   170  				},
   171  				peersPercentage: 1.0,
   172  				capacityWeight:  0.2,
   173  			},
   174  			update: func(s *scorers.BlockProviderScorer) {
   175  				s.IncrementProcessedBlocks("a", batchSize*2)
   176  				s.IncrementProcessedBlocks("b", batchSize*2)
   177  				s.IncrementProcessedBlocks("c", batchSize*2)
   178  				s.IncrementProcessedBlocks("d", batchSize*2)
   179  				s.IncrementProcessedBlocks("e", batchSize*2)
   180  			},
   181  			want: []peer.ID{"c", "b", "a", "e", "d"},
   182  		},
   183  		{
   184  			name: "multiple peers same capacity different scores",
   185  			args: args{
   186  				peers: []weightedPeer{
   187  					{"a", 9000},
   188  					{"b", 9000},
   189  					{"c", 9000},
   190  					{"d", 9000},
   191  					{"e", 9000},
   192  				},
   193  				peersPercentage: 0.8,
   194  				capacityWeight:  0.2,
   195  			},
   196  			update: func(s *scorers.BlockProviderScorer) {
   197  				s.IncrementProcessedBlocks("e", s.Params().ProcessedBlocksCap)
   198  				s.IncrementProcessedBlocks("b", s.Params().ProcessedBlocksCap/2)
   199  				s.IncrementProcessedBlocks("c", s.Params().ProcessedBlocksCap/4)
   200  				s.IncrementProcessedBlocks("a", s.Params().ProcessedBlocksCap/8)
   201  				s.IncrementProcessedBlocks("d", 0)
   202  			},
   203  			want: []peer.ID{"e", "b", "c", "a"},
   204  		},
   205  		{
   206  			name: "multiple peers different capacities and scores",
   207  			args: args{
   208  				peers: []weightedPeer{
   209  					{"a", 6500},
   210  					{"b", 2500},
   211  					{"c", 1000},
   212  					{"d", 9000},
   213  					{"e", 6500},
   214  				},
   215  				peersPercentage: 0.8,
   216  				capacityWeight:  0.2,
   217  			},
   218  			update: func(s *scorers.BlockProviderScorer) {
   219  				// Make sure that score takes priority over capacity.
   220  				s.IncrementProcessedBlocks("c", batchSize*5)
   221  				s.IncrementProcessedBlocks("b", batchSize*15)
   222  				// Break tie using capacity as a tie-breaker (a and ghi have the same score).
   223  				s.IncrementProcessedBlocks("a", batchSize*3)
   224  				s.IncrementProcessedBlocks("e", batchSize*3)
   225  				// Exclude peer (peers percentage is 80%).
   226  				s.IncrementProcessedBlocks("d", batchSize)
   227  			},
   228  			want: []peer.ID{"b", "c", "a", "e"},
   229  		},
   230  	}
   231  	for _, tt := range tests {
   232  		t.Run(tt.name, func(t *testing.T) {
   233  			mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
   234  			fetcher := newBlocksFetcher(context.Background(), &blocksFetcherConfig{
   235  				chain:                    mc,
   236  				p2p:                      p2p,
   237  				peerFilterCapacityWeight: tt.args.capacityWeight,
   238  			})
   239  			// Non-leaking bucket, with initial capacity of 10000.
   240  			fetcher.rateLimiter = leakybucket.NewCollector(0.000001, 10000, false)
   241  			peerIDs := make([]peer.ID, 0)
   242  			for _, pid := range tt.args.peers {
   243  				peerIDs = append(peerIDs, pid.ID)
   244  				fetcher.rateLimiter.Add(pid.ID.String(), pid.usedCapacity)
   245  			}
   246  			if tt.update != nil {
   247  				tt.update(fetcher.p2p.Peers().Scorers().BlockProviderScorer())
   248  			}
   249  			// Since peer selection is probabilistic (weighted, with high scorers having higher
   250  			// chance of being selected), we need multiple rounds of filtering to test the order:
   251  			// over multiple attempts, top scorers should be picked on high positions more often.
   252  			peerStats := make(map[peer.ID]int, len(tt.want))
   253  			var filteredPIDs []peer.ID
   254  			var err error
   255  			for i := 0; i < 1000; i++ {
   256  				filteredPIDs = fetcher.filterPeers(context.Background(), peerIDs, tt.args.peersPercentage)
   257  				if len(filteredPIDs) <= 1 {
   258  					break
   259  				}
   260  				require.NoError(t, err)
   261  				for j, pid := range filteredPIDs {
   262  					// The higher peer in the list, the more "points" will it get.
   263  					peerStats[pid] += len(tt.want) - j
   264  				}
   265  			}
   266  
   267  			// If percentage of peers was requested, rebuild combined filtered peers list.
   268  			if len(filteredPIDs) != len(peerStats) && len(peerStats) > 0 {
   269  				filteredPIDs = []peer.ID{}
   270  				for pid := range peerStats {
   271  					filteredPIDs = append(filteredPIDs, pid)
   272  				}
   273  			}
   274  
   275  			// Sort by frequency of appearance in high positions on filtering.
   276  			sort.Slice(filteredPIDs, func(i, j int) bool {
   277  				return peerStats[filteredPIDs[i]] > peerStats[filteredPIDs[j]]
   278  			})
   279  			if tt.args.peersPercentage < 1.0 {
   280  				limit := uint64(math.Round(float64(len(filteredPIDs)) * tt.args.peersPercentage))
   281  				filteredPIDs = filteredPIDs[:limit]
   282  			}
   283  
   284  			// Re-arrange peers with the same remaining capacity, deterministically .
   285  			// They are deliberately shuffled - so that on the same capacity any of
   286  			// such peers can be selected. That's why they are sorted here.
   287  			sort.SliceStable(filteredPIDs, func(i, j int) bool {
   288  				score1 := fetcher.p2p.Peers().Scorers().BlockProviderScorer().Score(filteredPIDs[i])
   289  				score2 := fetcher.p2p.Peers().Scorers().BlockProviderScorer().Score(filteredPIDs[j])
   290  				if score1 == score2 {
   291  					cap1 := fetcher.rateLimiter.Remaining(filteredPIDs[i].String())
   292  					cap2 := fetcher.rateLimiter.Remaining(filteredPIDs[j].String())
   293  					if cap1 == cap2 {
   294  						return filteredPIDs[i].String() < filteredPIDs[j].String()
   295  					}
   296  				}
   297  				return i < j
   298  			})
   299  			assert.DeepEqual(t, tt.want, filteredPIDs)
   300  		})
   301  	}
   302  }
   303  
   304  func TestBlocksFetcher_removeStalePeerLocks(t *testing.T) {
   305  	type peerData struct {
   306  		peerID   peer.ID
   307  		accessed time.Time
   308  	}
   309  	tests := []struct {
   310  		name     string
   311  		age      time.Duration
   312  		peersIn  []peerData
   313  		peersOut []peerData
   314  	}{
   315  		{
   316  			name:     "empty map",
   317  			age:      peerLockMaxAge,
   318  			peersIn:  []peerData{},
   319  			peersOut: []peerData{},
   320  		},
   321  		{
   322  			name: "no stale peer locks",
   323  			age:  peerLockMaxAge,
   324  			peersIn: []peerData{
   325  				{
   326  					peerID:   "a",
   327  					accessed: timeutils.Now(),
   328  				},
   329  				{
   330  					peerID:   "b",
   331  					accessed: timeutils.Now(),
   332  				},
   333  				{
   334  					peerID:   "c",
   335  					accessed: timeutils.Now(),
   336  				},
   337  			},
   338  			peersOut: []peerData{
   339  				{
   340  					peerID:   "a",
   341  					accessed: timeutils.Now(),
   342  				},
   343  				{
   344  					peerID:   "b",
   345  					accessed: timeutils.Now(),
   346  				},
   347  				{
   348  					peerID:   "c",
   349  					accessed: timeutils.Now(),
   350  				},
   351  			},
   352  		},
   353  		{
   354  			name: "one stale peer lock",
   355  			age:  peerLockMaxAge,
   356  			peersIn: []peerData{
   357  				{
   358  					peerID:   "a",
   359  					accessed: timeutils.Now(),
   360  				},
   361  				{
   362  					peerID:   "b",
   363  					accessed: timeutils.Now().Add(-peerLockMaxAge),
   364  				},
   365  				{
   366  					peerID:   "c",
   367  					accessed: timeutils.Now(),
   368  				},
   369  			},
   370  			peersOut: []peerData{
   371  				{
   372  					peerID:   "a",
   373  					accessed: timeutils.Now(),
   374  				},
   375  				{
   376  					peerID:   "c",
   377  					accessed: timeutils.Now(),
   378  				},
   379  			},
   380  		},
   381  		{
   382  			name: "all peer locks are stale",
   383  			age:  peerLockMaxAge,
   384  			peersIn: []peerData{
   385  				{
   386  					peerID:   "a",
   387  					accessed: timeutils.Now().Add(-peerLockMaxAge),
   388  				},
   389  				{
   390  					peerID:   "b",
   391  					accessed: timeutils.Now().Add(-peerLockMaxAge),
   392  				},
   393  				{
   394  					peerID:   "c",
   395  					accessed: timeutils.Now().Add(-peerLockMaxAge),
   396  				},
   397  			},
   398  			peersOut: []peerData{},
   399  		},
   400  	}
   401  
   402  	ctx, cancel := context.WithCancel(context.Background())
   403  	defer cancel()
   404  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{})
   405  
   406  	for _, tt := range tests {
   407  		t.Run(tt.name, func(t *testing.T) {
   408  			fetcher.peerLocks = make(map[peer.ID]*peerLock, len(tt.peersIn))
   409  			for _, data := range tt.peersIn {
   410  				fetcher.peerLocks[data.peerID] = &peerLock{
   411  					Mutex:    sync.Mutex{},
   412  					accessed: data.accessed,
   413  				}
   414  			}
   415  
   416  			fetcher.removeStalePeerLocks(tt.age)
   417  
   418  			var peersOut1, peersOut2 []peer.ID
   419  			for _, data := range tt.peersOut {
   420  				peersOut1 = append(peersOut1, data.peerID)
   421  			}
   422  			for peerID := range fetcher.peerLocks {
   423  				peersOut2 = append(peersOut2, peerID)
   424  			}
   425  			sort.SliceStable(peersOut1, func(i, j int) bool {
   426  				return peersOut1[i].String() < peersOut1[j].String()
   427  			})
   428  			sort.SliceStable(peersOut2, func(i, j int) bool {
   429  				return peersOut2[i].String() < peersOut2[j].String()
   430  			})
   431  			assert.DeepEqual(t, peersOut1, peersOut2, "Unexpected peers map")
   432  		})
   433  	}
   434  }