github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/p2p/peers/status_test.go (about) 1 package peers_test 2 3 import ( 4 "context" 5 "crypto/rand" 6 "strconv" 7 "testing" 8 "time" 9 10 "github.com/ethereum/go-ethereum/p2p/enr" 11 "github.com/libp2p/go-libp2p-core/network" 12 "github.com/libp2p/go-libp2p-core/peer" 13 ma "github.com/multiformats/go-multiaddr" 14 types "github.com/prysmaticlabs/eth2-types" 15 "github.com/prysmaticlabs/go-bitfield" 16 "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers" 17 "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata" 18 "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers" 19 pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" 20 ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1" 21 "github.com/prysmaticlabs/prysm/shared/interfaces" 22 "github.com/prysmaticlabs/prysm/shared/params" 23 "github.com/prysmaticlabs/prysm/shared/testutil/assert" 24 "github.com/prysmaticlabs/prysm/shared/testutil/require" 25 ) 26 27 func TestStatus(t *testing.T) { 28 maxBadResponses := 2 29 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 30 PeerLimit: 30, 31 ScorerParams: &scorers.Config{ 32 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 33 Threshold: maxBadResponses, 34 }, 35 }, 36 }) 37 require.NotNil(t, p, "p not created") 38 assert.Equal(t, maxBadResponses, p.Scorers().BadResponsesScorer().Params().Threshold, "maxBadResponses incorrect value") 39 } 40 41 func TestPeerExplicitAdd(t *testing.T) { 42 maxBadResponses := 2 43 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 44 PeerLimit: 30, 45 ScorerParams: &scorers.Config{ 46 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 47 Threshold: maxBadResponses, 48 }, 49 }, 50 }) 51 52 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 53 require.NoError(t, err, "Failed to create ID") 54 address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") 55 require.NoError(t, err, "Failed to create address") 56 direction := network.DirInbound 57 p.Add(new(enr.Record), id, address, direction) 58 59 resAddress, err := p.Address(id) 60 require.NoError(t, err) 61 assert.Equal(t, address, resAddress, "Unexpected address") 62 63 resDirection, err := p.Direction(id) 64 require.NoError(t, err) 65 assert.Equal(t, direction, resDirection, "Unexpected direction") 66 67 // Update with another explicit add 68 address2, err := ma.NewMultiaddr("/ip4/52.23.23.253/tcp/30000/ipfs/QmfAgkmjiZNZhr2wFN9TwaRgHouMTBT6HELyzE5A3BT2wK/p2p-circuit") 69 require.NoError(t, err) 70 direction2 := network.DirOutbound 71 p.Add(new(enr.Record), id, address2, direction2) 72 73 resAddress2, err := p.Address(id) 74 require.NoError(t, err) 75 assert.Equal(t, address2, resAddress2, "Unexpected address") 76 77 resDirection2, err := p.Direction(id) 78 require.NoError(t, err) 79 assert.Equal(t, direction2, resDirection2, "Unexpected direction") 80 } 81 82 func TestPeerNoENR(t *testing.T) { 83 maxBadResponses := 2 84 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 85 PeerLimit: 30, 86 ScorerParams: &scorers.Config{ 87 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 88 Threshold: maxBadResponses, 89 }, 90 }, 91 }) 92 93 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 94 require.NoError(t, err, "Failed to create ID") 95 address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") 96 require.NoError(t, err, "Failed to create address") 97 direction := network.DirInbound 98 p.Add(nil, id, address, direction) 99 100 retrievedENR, err := p.ENR(id) 101 require.NoError(t, err, "Could not retrieve chainstate") 102 var nilENR *enr.Record 103 assert.Equal(t, nilENR, retrievedENR, "Wanted a nil enr to be saved") 104 } 105 106 func TestPeerNoOverwriteENR(t *testing.T) { 107 maxBadResponses := 2 108 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 109 PeerLimit: 30, 110 ScorerParams: &scorers.Config{ 111 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 112 Threshold: maxBadResponses, 113 }, 114 }, 115 }) 116 117 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 118 require.NoError(t, err, "Failed to create ID") 119 address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") 120 require.NoError(t, err, "Failed to create address") 121 direction := network.DirInbound 122 record := new(enr.Record) 123 record.Set(enr.WithEntry("test", []byte{'a'})) 124 p.Add(record, id, address, direction) 125 // try to overwrite 126 p.Add(nil, id, address, direction) 127 128 retrievedENR, err := p.ENR(id) 129 require.NoError(t, err, "Could not retrieve chainstate") 130 require.NotNil(t, retrievedENR, "Wanted a non-nil enr") 131 } 132 133 func TestErrUnknownPeer(t *testing.T) { 134 maxBadResponses := 2 135 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 136 PeerLimit: 30, 137 ScorerParams: &scorers.Config{ 138 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 139 Threshold: maxBadResponses, 140 }, 141 }, 142 }) 143 144 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 145 require.NoError(t, err) 146 147 _, err = p.Address(id) 148 assert.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err) 149 150 _, err = p.Direction(id) 151 assert.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err) 152 153 _, err = p.ChainState(id) 154 assert.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err) 155 156 _, err = p.ConnectionState(id) 157 assert.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err) 158 159 _, err = p.ChainStateLastUpdated(id) 160 assert.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err) 161 162 _, err = p.Scorers().BadResponsesScorer().Count(id) 163 assert.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err) 164 } 165 166 func TestPeerCommitteeIndices(t *testing.T) { 167 maxBadResponses := 2 168 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 169 PeerLimit: 30, 170 ScorerParams: &scorers.Config{ 171 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 172 Threshold: maxBadResponses, 173 }, 174 }, 175 }) 176 177 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 178 require.NoError(t, err, "Failed to create ID") 179 address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") 180 require.NoError(t, err, "Failed to create address") 181 direction := network.DirInbound 182 record := new(enr.Record) 183 record.Set(enr.WithEntry("test", []byte{'a'})) 184 p.Add(record, id, address, direction) 185 bitV := bitfield.NewBitvector64() 186 for i := 0; i < 64; i++ { 187 if i == 2 || i == 8 || i == 9 { 188 bitV.SetBitAt(uint64(i), true) 189 } 190 } 191 p.SetMetadata(id, interfaces.WrappedMetadataV0(&pb.MetaDataV0{ 192 SeqNumber: 2, 193 Attnets: bitV, 194 })) 195 196 wantedIndices := []uint64{2, 8, 9} 197 198 indices, err := p.CommitteeIndices(id) 199 require.NoError(t, err, "Could not retrieve committee indices") 200 assert.DeepEqual(t, wantedIndices, indices) 201 } 202 203 func TestPeerSubscribedToSubnet(t *testing.T) { 204 maxBadResponses := 2 205 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 206 PeerLimit: 30, 207 ScorerParams: &scorers.Config{ 208 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 209 Threshold: maxBadResponses, 210 }, 211 }, 212 }) 213 214 // Add some peers with different states 215 numPeers := 2 216 for i := 0; i < numPeers; i++ { 217 addPeer(t, p, peers.PeerConnected) 218 } 219 expectedPeer := p.All()[1] 220 bitV := bitfield.NewBitvector64() 221 for i := 0; i < 64; i++ { 222 if i == 2 || i == 8 || i == 9 { 223 bitV.SetBitAt(uint64(i), true) 224 } 225 } 226 p.SetMetadata(expectedPeer, interfaces.WrappedMetadataV0(&pb.MetaDataV0{ 227 SeqNumber: 2, 228 Attnets: bitV, 229 })) 230 numPeers = 3 231 for i := 0; i < numPeers; i++ { 232 addPeer(t, p, peers.PeerDisconnected) 233 } 234 ps := p.SubscribedToSubnet(2) 235 assert.Equal(t, 1, len(ps), "Unexpected num of peers") 236 assert.Equal(t, expectedPeer, ps[0]) 237 238 ps = p.SubscribedToSubnet(8) 239 assert.Equal(t, 1, len(ps), "Unexpected num of peers") 240 assert.Equal(t, expectedPeer, ps[0]) 241 242 ps = p.SubscribedToSubnet(9) 243 assert.Equal(t, 1, len(ps), "Unexpected num of peers") 244 assert.Equal(t, expectedPeer, ps[0]) 245 } 246 247 func TestPeerImplicitAdd(t *testing.T) { 248 maxBadResponses := 2 249 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 250 PeerLimit: 30, 251 ScorerParams: &scorers.Config{ 252 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 253 Threshold: maxBadResponses, 254 }, 255 }, 256 }) 257 258 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 259 require.NoError(t, err) 260 261 connectionState := peers.PeerConnecting 262 p.SetConnectionState(id, connectionState) 263 264 resConnectionState, err := p.ConnectionState(id) 265 require.NoError(t, err) 266 267 assert.Equal(t, connectionState, resConnectionState, "Unexpected connection state") 268 } 269 270 func TestPeerChainState(t *testing.T) { 271 maxBadResponses := 2 272 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 273 PeerLimit: 30, 274 ScorerParams: &scorers.Config{ 275 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 276 Threshold: maxBadResponses, 277 }, 278 }, 279 }) 280 281 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 282 require.NoError(t, err) 283 address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") 284 require.NoError(t, err, "Failed to create address") 285 direction := network.DirInbound 286 p.Add(new(enr.Record), id, address, direction) 287 288 oldChainStartLastUpdated, err := p.ChainStateLastUpdated(id) 289 require.NoError(t, err) 290 291 finalizedEpoch := types.Epoch(123) 292 p.SetChainState(id, &pb.Status{FinalizedEpoch: finalizedEpoch}) 293 294 resChainState, err := p.ChainState(id) 295 require.NoError(t, err) 296 assert.Equal(t, finalizedEpoch, resChainState.FinalizedEpoch, "Unexpected finalized epoch") 297 298 newChainStartLastUpdated, err := p.ChainStateLastUpdated(id) 299 require.NoError(t, err) 300 if !newChainStartLastUpdated.After(oldChainStartLastUpdated) { 301 t.Errorf("Last updated did not increase: old %v new %v", oldChainStartLastUpdated, newChainStartLastUpdated) 302 } 303 } 304 305 func TestPeerWithNilChainState(t *testing.T) { 306 maxBadResponses := 2 307 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 308 PeerLimit: 30, 309 ScorerParams: &scorers.Config{ 310 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 311 Threshold: maxBadResponses, 312 }, 313 }, 314 }) 315 316 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 317 require.NoError(t, err) 318 address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") 319 require.NoError(t, err, "Failed to create address") 320 direction := network.DirInbound 321 p.Add(new(enr.Record), id, address, direction) 322 323 p.SetChainState(id, nil) 324 325 resChainState, err := p.ChainState(id) 326 require.Equal(t, peers.ErrNoPeerStatus, err) 327 var nothing *pb.Status 328 require.Equal(t, resChainState, nothing) 329 } 330 331 func TestPeerBadResponses(t *testing.T) { 332 maxBadResponses := 2 333 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 334 PeerLimit: 30, 335 ScorerParams: &scorers.Config{ 336 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 337 Threshold: maxBadResponses, 338 }, 339 }, 340 }) 341 342 id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") 343 require.NoError(t, err) 344 { 345 _, err := id.MarshalBinary() 346 require.NoError(t, err) 347 } 348 349 assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good") 350 351 address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") 352 require.NoError(t, err, "Failed to create address") 353 direction := network.DirInbound 354 p.Add(new(enr.Record), id, address, direction) 355 356 scorer := p.Scorers().BadResponsesScorer() 357 resBadResponses, err := scorer.Count(id) 358 require.NoError(t, err) 359 assert.Equal(t, 0, resBadResponses, "Unexpected bad responses") 360 assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good") 361 362 scorer.Increment(id) 363 resBadResponses, err = scorer.Count(id) 364 require.NoError(t, err) 365 assert.Equal(t, 1, resBadResponses, "Unexpected bad responses") 366 assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good") 367 368 scorer.Increment(id) 369 resBadResponses, err = scorer.Count(id) 370 require.NoError(t, err) 371 assert.Equal(t, 2, resBadResponses, "Unexpected bad responses") 372 assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be") 373 374 scorer.Increment(id) 375 resBadResponses, err = scorer.Count(id) 376 require.NoError(t, err) 377 assert.Equal(t, 3, resBadResponses, "Unexpected bad responses") 378 assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be") 379 } 380 381 func TestAddMetaData(t *testing.T) { 382 maxBadResponses := 2 383 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 384 PeerLimit: 30, 385 ScorerParams: &scorers.Config{ 386 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 387 Threshold: maxBadResponses, 388 }, 389 }, 390 }) 391 392 // Add some peers with different states 393 numPeers := 5 394 for i := 0; i < numPeers; i++ { 395 addPeer(t, p, peers.PeerConnected) 396 } 397 newPeer := p.All()[2] 398 399 newMetaData := &pb.MetaDataV0{ 400 SeqNumber: 8, 401 Attnets: bitfield.NewBitvector64(), 402 } 403 p.SetMetadata(newPeer, interfaces.WrappedMetadataV0(newMetaData)) 404 405 md, err := p.Metadata(newPeer) 406 require.NoError(t, err) 407 assert.Equal(t, newMetaData.SeqNumber, md.SequenceNumber(), "Unexpected sequence number") 408 } 409 410 func TestPeerConnectionStatuses(t *testing.T) { 411 maxBadResponses := 2 412 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 413 PeerLimit: 30, 414 ScorerParams: &scorers.Config{ 415 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 416 Threshold: maxBadResponses, 417 }, 418 }, 419 }) 420 421 // Add some peers with different states 422 numPeersDisconnected := 11 423 for i := 0; i < numPeersDisconnected; i++ { 424 addPeer(t, p, peers.PeerDisconnected) 425 } 426 numPeersConnecting := 7 427 for i := 0; i < numPeersConnecting; i++ { 428 addPeer(t, p, peers.PeerConnecting) 429 } 430 numPeersConnected := 43 431 for i := 0; i < numPeersConnected; i++ { 432 addPeer(t, p, peers.PeerConnected) 433 } 434 numPeersDisconnecting := 4 435 for i := 0; i < numPeersDisconnecting; i++ { 436 addPeer(t, p, peers.PeerDisconnecting) 437 } 438 439 // Now confirm the states 440 assert.Equal(t, numPeersDisconnected, len(p.Disconnected()), "Unexpected number of disconnected peers") 441 assert.Equal(t, numPeersConnecting, len(p.Connecting()), "Unexpected number of connecting peers") 442 assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers") 443 assert.Equal(t, numPeersDisconnecting, len(p.Disconnecting()), "Unexpected number of disconnecting peers") 444 numPeersActive := numPeersConnecting + numPeersConnected 445 assert.Equal(t, numPeersActive, len(p.Active()), "Unexpected number of active peers") 446 numPeersInactive := numPeersDisconnecting + numPeersDisconnected 447 assert.Equal(t, numPeersInactive, len(p.Inactive()), "Unexpected number of inactive peers") 448 numPeersAll := numPeersActive + numPeersInactive 449 assert.Equal(t, numPeersAll, len(p.All()), "Unexpected number of peers") 450 } 451 452 func TestPeerValidTime(t *testing.T) { 453 maxBadResponses := 2 454 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 455 PeerLimit: 30, 456 ScorerParams: &scorers.Config{ 457 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 458 Threshold: maxBadResponses, 459 }, 460 }, 461 }) 462 463 numPeersConnected := 6 464 for i := 0; i < numPeersConnected; i++ { 465 addPeer(t, p, peers.PeerConnected) 466 } 467 468 allPeers := p.All() 469 470 // Add for 1st peer 471 p.SetNextValidTime(allPeers[0], time.Now().Add(-1*time.Second)) 472 p.SetNextValidTime(allPeers[1], time.Now().Add(1*time.Second)) 473 p.SetNextValidTime(allPeers[2], time.Now().Add(10*time.Second)) 474 475 assert.Equal(t, true, p.IsReadyToDial(allPeers[0])) 476 assert.Equal(t, false, p.IsReadyToDial(allPeers[1])) 477 assert.Equal(t, false, p.IsReadyToDial(allPeers[2])) 478 479 nextVal, err := p.NextValidTime(allPeers[3]) 480 require.NoError(t, err) 481 assert.Equal(t, true, nextVal.IsZero()) 482 assert.Equal(t, true, p.IsReadyToDial(allPeers[3])) 483 484 nextVal, err = p.NextValidTime(allPeers[4]) 485 require.NoError(t, err) 486 assert.Equal(t, true, nextVal.IsZero()) 487 assert.Equal(t, true, p.IsReadyToDial(allPeers[4])) 488 489 nextVal, err = p.NextValidTime(allPeers[5]) 490 require.NoError(t, err) 491 assert.Equal(t, true, nextVal.IsZero()) 492 assert.Equal(t, true, p.IsReadyToDial(allPeers[5])) 493 494 // Now confirm the states 495 assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers") 496 } 497 498 func TestPrune(t *testing.T) { 499 maxBadResponses := 2 500 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 501 PeerLimit: 30, 502 ScorerParams: &scorers.Config{ 503 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 504 Threshold: maxBadResponses, 505 }, 506 }, 507 }) 508 509 for i := 0; i < p.MaxPeerLimit()+100; i++ { 510 if i%7 == 0 { 511 // Peer added as disconnected. 512 _ = addPeer(t, p, peers.PeerDisconnected) 513 } 514 // Peer added to peer handler. 515 _ = addPeer(t, p, peers.PeerConnected) 516 } 517 518 disPeers := p.Disconnected() 519 firstPID := disPeers[0] 520 secondPID := disPeers[1] 521 thirdPID := disPeers[2] 522 523 scorer := p.Scorers().BadResponsesScorer() 524 525 // Make first peer a bad peer 526 scorer.Increment(firstPID) 527 scorer.Increment(firstPID) 528 529 // Add bad response for p2. 530 scorer.Increment(secondPID) 531 532 // Prune peers 533 p.Prune() 534 535 // Bad peer is expected to still be kept in handler. 536 badRes, err := scorer.Count(firstPID) 537 assert.NoError(t, err, "error is supposed to be nil") 538 assert.Equal(t, 2, badRes, "Did not get expected amount") 539 540 // Not so good peer is pruned away so that we can reduce the 541 // total size of the handler. 542 _, err = scorer.Count(secondPID) 543 assert.ErrorContains(t, "peer unknown", err) 544 545 // Last peer has been removed. 546 _, err = scorer.Count(thirdPID) 547 assert.ErrorContains(t, "peer unknown", err) 548 } 549 550 func TestPeerIPTracker(t *testing.T) { 551 maxBadResponses := 2 552 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 553 PeerLimit: 30, 554 ScorerParams: &scorers.Config{ 555 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 556 Threshold: maxBadResponses, 557 }, 558 }, 559 }) 560 561 badIP := "211.227.218.116" 562 var badPeers []peer.ID 563 for i := 0; i < peers.ColocationLimit+10; i++ { 564 port := strconv.Itoa(3000 + i) 565 addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port) 566 if err != nil { 567 t.Fatal(err) 568 } 569 badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED))) 570 } 571 for _, pr := range badPeers { 572 assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad") 573 } 574 575 // Add in bad peers, so that our records are trimmed out 576 // from the peer store. 577 for i := 0; i < p.MaxPeerLimit()+100; i++ { 578 // Peer added to peer handler. 579 pid := addPeer(t, p, peers.PeerDisconnected) 580 p.Scorers().BadResponsesScorer().Increment(pid) 581 } 582 p.Prune() 583 584 for _, pr := range badPeers { 585 assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad") 586 } 587 } 588 589 func TestTrimmedOrderedPeers(t *testing.T) { 590 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 591 PeerLimit: 30, 592 ScorerParams: &scorers.Config{ 593 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 594 Threshold: 1, 595 }, 596 }, 597 }) 598 599 expectedTarget := types.Epoch(2) 600 maxPeers := 3 601 mockroot2 := [32]byte{} 602 mockroot3 := [32]byte{} 603 mockroot4 := [32]byte{} 604 mockroot5 := [32]byte{} 605 copy(mockroot2[:], "two") 606 copy(mockroot3[:], "three") 607 copy(mockroot4[:], "four") 608 copy(mockroot5[:], "five") 609 // Peer 1 610 pid1 := addPeer(t, p, peers.PeerConnected) 611 p.SetChainState(pid1, &pb.Status{ 612 HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch, 613 FinalizedEpoch: 3, 614 FinalizedRoot: mockroot3[:], 615 }) 616 // Peer 2 617 pid2 := addPeer(t, p, peers.PeerConnected) 618 p.SetChainState(pid2, &pb.Status{ 619 HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch, 620 FinalizedEpoch: 4, 621 FinalizedRoot: mockroot4[:], 622 }) 623 // Peer 3 624 pid3 := addPeer(t, p, peers.PeerConnected) 625 p.SetChainState(pid3, &pb.Status{ 626 HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch, 627 FinalizedEpoch: 5, 628 FinalizedRoot: mockroot5[:], 629 }) 630 // Peer 4 631 pid4 := addPeer(t, p, peers.PeerConnected) 632 p.SetChainState(pid4, &pb.Status{ 633 HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch, 634 FinalizedEpoch: 2, 635 FinalizedRoot: mockroot2[:], 636 }) 637 // Peer 5 638 pid5 := addPeer(t, p, peers.PeerConnected) 639 p.SetChainState(pid5, &pb.Status{ 640 HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch, 641 FinalizedEpoch: 2, 642 FinalizedRoot: mockroot2[:], 643 }) 644 645 target, pids := p.BestFinalized(maxPeers, 0) 646 assert.Equal(t, expectedTarget, target, "Incorrect target epoch retrieved") 647 assert.Equal(t, maxPeers, len(pids), "Incorrect number of peers retrieved") 648 649 // Expect the returned list to be ordered by finalized epoch and trimmed to max peers. 650 assert.Equal(t, pid3, pids[0], "Incorrect first peer") 651 assert.Equal(t, pid2, pids[1], "Incorrect second peer") 652 assert.Equal(t, pid1, pids[2], "Incorrect third peer") 653 } 654 655 func TestConcurrentPeerLimitHolds(t *testing.T) { 656 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 657 PeerLimit: 30, 658 ScorerParams: &scorers.Config{ 659 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 660 Threshold: 1, 661 }, 662 }, 663 }) 664 assert.Equal(t, true, uint64(p.MaxPeerLimit()) > p.ConnectedPeerLimit(), "max peer limit doesnt exceed connected peer limit") 665 } 666 667 func TestAtInboundPeerLimit(t *testing.T) { 668 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 669 PeerLimit: 30, 670 ScorerParams: &scorers.Config{ 671 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 672 Threshold: 1, 673 }, 674 }, 675 }) 676 for i := 0; i < 15; i++ { 677 // Peer added to peer handler. 678 createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED)) 679 } 680 assert.Equal(t, false, p.IsAboveInboundLimit(), "Inbound limit exceeded") 681 for i := 0; i < 31; i++ { 682 // Peer added to peer handler. 683 createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED)) 684 } 685 assert.Equal(t, true, p.IsAboveInboundLimit(), "Inbound limit not exceeded") 686 } 687 688 func TestPrunePeers(t *testing.T) { 689 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 690 PeerLimit: 30, 691 ScorerParams: &scorers.Config{ 692 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 693 Threshold: 1, 694 }, 695 }, 696 }) 697 for i := 0; i < 15; i++ { 698 // Peer added to peer handler. 699 createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED)) 700 } 701 // Assert there are no prunable peers. 702 peersToPrune := p.PeersToPrune() 703 assert.Equal(t, 0, len(peersToPrune)) 704 705 for i := 0; i < 18; i++ { 706 // Peer added to peer handler. 707 createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED)) 708 } 709 710 // Assert there are the correct prunable peers. 711 peersToPrune = p.PeersToPrune() 712 assert.Equal(t, 3, len(peersToPrune)) 713 714 // Add in more peers. 715 for i := 0; i < 13; i++ { 716 // Peer added to peer handler. 717 createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED)) 718 } 719 720 // Set up bad scores for inbound peers. 721 inboundPeers := p.InboundConnected() 722 for i, pid := range inboundPeers { 723 modulo := i % 5 724 // Increment bad scores for peers. 725 for j := 0; j < modulo; j++ { 726 p.Scorers().BadResponsesScorer().Increment(pid) 727 } 728 } 729 // Assert all peers more than max are prunable. 730 peersToPrune = p.PeersToPrune() 731 assert.Equal(t, 16, len(peersToPrune)) 732 for _, pid := range peersToPrune { 733 dir, err := p.Direction(pid) 734 require.NoError(t, err) 735 assert.Equal(t, network.DirInbound, dir) 736 } 737 738 // Ensure it is in the descending order. 739 currCount, err := p.Scorers().BadResponsesScorer().Count(peersToPrune[0]) 740 require.NoError(t, err) 741 for _, pid := range peersToPrune { 742 count, err := p.Scorers().BadResponsesScorer().Count(pid) 743 require.NoError(t, err) 744 assert.Equal(t, true, currCount >= count) 745 currCount = count 746 } 747 } 748 749 func TestStatus_BestPeer(t *testing.T) { 750 type peerConfig struct { 751 headSlot types.Slot 752 finalizedEpoch types.Epoch 753 } 754 tests := []struct { 755 name string 756 peers []*peerConfig 757 limitPeers int 758 ourFinalizedEpoch types.Epoch 759 targetEpoch types.Epoch 760 // targetEpochSupport denotes how many peers support returned epoch. 761 targetEpochSupport int 762 }{ 763 { 764 name: "head slot matches finalized epoch", 765 peers: []*peerConfig{ 766 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 767 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 768 {finalizedEpoch: 3, headSlot: 3 * params.BeaconConfig().SlotsPerEpoch}, 769 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 770 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 771 {finalizedEpoch: 3, headSlot: 3 * params.BeaconConfig().SlotsPerEpoch}, 772 }, 773 limitPeers: 15, 774 targetEpoch: 4, 775 targetEpochSupport: 4, 776 }, 777 { 778 // Peers are compared using their finalized epoch, head should not affect peer selection. 779 // Test case below is a regression case: to ensure that only epoch is used indeed. 780 // (Function sorts peers, and on equal head slot, produced incorrect results). 781 name: "head slots equal for peers with different finalized epochs", 782 peers: []*peerConfig{ 783 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 784 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 785 {finalizedEpoch: 3, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 786 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 787 {finalizedEpoch: 4, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 788 {finalizedEpoch: 3, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch}, 789 }, 790 limitPeers: 15, 791 targetEpoch: 4, 792 targetEpochSupport: 4, 793 }, 794 { 795 name: "head slot significantly ahead of finalized epoch (long period of non-finality)", 796 peers: []*peerConfig{ 797 {finalizedEpoch: 4, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 798 {finalizedEpoch: 4, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 799 {finalizedEpoch: 3, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 800 {finalizedEpoch: 4, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 801 {finalizedEpoch: 4, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 802 {finalizedEpoch: 3, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 803 }, 804 limitPeers: 15, 805 targetEpoch: 4, 806 targetEpochSupport: 4, 807 }, 808 { 809 name: "ignore lower epoch peers", 810 peers: []*peerConfig{ 811 {finalizedEpoch: 4, headSlot: 41 * params.BeaconConfig().SlotsPerEpoch}, 812 {finalizedEpoch: 4, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 813 {finalizedEpoch: 3, headSlot: 43 * params.BeaconConfig().SlotsPerEpoch}, 814 {finalizedEpoch: 4, headSlot: 44 * params.BeaconConfig().SlotsPerEpoch}, 815 {finalizedEpoch: 4, headSlot: 45 * params.BeaconConfig().SlotsPerEpoch}, 816 {finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch}, 817 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 818 }, 819 ourFinalizedEpoch: 5, 820 limitPeers: 15, 821 targetEpoch: 6, 822 targetEpochSupport: 1, 823 }, 824 { 825 name: "combine peers from several epochs starting from epoch higher than ours", 826 peers: []*peerConfig{ 827 {finalizedEpoch: 4, headSlot: 41 * params.BeaconConfig().SlotsPerEpoch}, 828 {finalizedEpoch: 4, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 829 {finalizedEpoch: 3, headSlot: 43 * params.BeaconConfig().SlotsPerEpoch}, 830 {finalizedEpoch: 4, headSlot: 44 * params.BeaconConfig().SlotsPerEpoch}, 831 {finalizedEpoch: 4, headSlot: 45 * params.BeaconConfig().SlotsPerEpoch}, 832 {finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch}, 833 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 834 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 835 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 836 {finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch}, 837 {finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch}, 838 }, 839 ourFinalizedEpoch: 5, 840 limitPeers: 15, 841 targetEpoch: 6, 842 targetEpochSupport: 5, 843 }, 844 { 845 name: "limit number of returned peers", 846 peers: []*peerConfig{ 847 {finalizedEpoch: 4, headSlot: 41 * params.BeaconConfig().SlotsPerEpoch}, 848 {finalizedEpoch: 4, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch}, 849 {finalizedEpoch: 3, headSlot: 43 * params.BeaconConfig().SlotsPerEpoch}, 850 {finalizedEpoch: 4, headSlot: 44 * params.BeaconConfig().SlotsPerEpoch}, 851 {finalizedEpoch: 4, headSlot: 45 * params.BeaconConfig().SlotsPerEpoch}, 852 {finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch}, 853 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 854 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 855 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 856 {finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch}, 857 {finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch}, 858 }, 859 ourFinalizedEpoch: 5, 860 limitPeers: 4, 861 targetEpoch: 6, 862 targetEpochSupport: 4, 863 }, 864 { 865 name: "handle epoch ties", 866 peers: []*peerConfig{ 867 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 868 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 869 {finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch}, 870 {finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch}, 871 {finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch}, 872 {finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch}, 873 {finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch}, 874 }, 875 ourFinalizedEpoch: 5, 876 limitPeers: 15, 877 targetEpoch: 8, 878 targetEpochSupport: 3, 879 }, 880 } 881 882 for _, tt := range tests { 883 t.Run(tt.name, func(t *testing.T) { 884 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 885 PeerLimit: 30, 886 ScorerParams: &scorers.Config{ 887 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{Threshold: 2}, 888 }, 889 }) 890 for _, peerConfig := range tt.peers { 891 p.SetChainState(addPeer(t, p, peers.PeerConnected), &pb.Status{ 892 FinalizedEpoch: peerConfig.finalizedEpoch, 893 HeadSlot: peerConfig.headSlot, 894 }) 895 } 896 epoch, pids := p.BestFinalized(tt.limitPeers, tt.ourFinalizedEpoch) 897 assert.Equal(t, tt.targetEpoch, epoch, "Unexpected epoch retrieved") 898 assert.Equal(t, tt.targetEpochSupport, len(pids), "Unexpected number of peers supporting retrieved epoch") 899 }) 900 } 901 } 902 903 func TestBestFinalized_returnsMaxValue(t *testing.T) { 904 maxBadResponses := 2 905 maxPeers := 10 906 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 907 PeerLimit: 30, 908 ScorerParams: &scorers.Config{ 909 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 910 Threshold: maxBadResponses, 911 }, 912 }, 913 }) 914 915 for i := 0; i <= maxPeers+100; i++ { 916 p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound) 917 p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected) 918 p.SetChainState(peer.ID(rune(i)), &pb.Status{ 919 FinalizedEpoch: 10, 920 }) 921 } 922 923 _, pids := p.BestFinalized(maxPeers, 0) 924 assert.Equal(t, maxPeers, len(pids), "Wrong number of peers returned") 925 } 926 927 func TestStatus_BestNonFinalized(t *testing.T) { 928 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 929 PeerLimit: 30, 930 ScorerParams: &scorers.Config{ 931 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 932 Threshold: 2, 933 }, 934 }, 935 }) 936 937 peerSlots := []types.Slot{32, 32, 32, 32, 235, 233, 258, 268, 270} 938 for i, headSlot := range peerSlots { 939 p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound) 940 p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected) 941 p.SetChainState(peer.ID(rune(i)), &pb.Status{ 942 HeadSlot: headSlot, 943 }) 944 } 945 946 expectedEpoch := types.Epoch(8) 947 retEpoch, pids := p.BestNonFinalized(3, 5) 948 assert.Equal(t, expectedEpoch, retEpoch, "Incorrect Finalized epoch retrieved") 949 assert.Equal(t, 3, len(pids), "Unexpected number of peers") 950 } 951 952 func TestStatus_CurrentEpoch(t *testing.T) { 953 maxBadResponses := 2 954 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 955 PeerLimit: 30, 956 ScorerParams: &scorers.Config{ 957 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 958 Threshold: maxBadResponses, 959 }, 960 }, 961 }) 962 // Peer 1 963 pid1 := addPeer(t, p, peers.PeerConnected) 964 p.SetChainState(pid1, &pb.Status{ 965 HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4, 966 }) 967 // Peer 2 968 pid2 := addPeer(t, p, peers.PeerConnected) 969 p.SetChainState(pid2, &pb.Status{ 970 HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5, 971 }) 972 // Peer 3 973 pid3 := addPeer(t, p, peers.PeerConnected) 974 p.SetChainState(pid3, &pb.Status{ 975 HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4, 976 }) 977 978 assert.Equal(t, types.Epoch(5), p.HighestEpoch(), "Expected current epoch to be 5") 979 } 980 981 func TestInbound(t *testing.T) { 982 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 983 PeerLimit: 30, 984 ScorerParams: &scorers.Config{ 985 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 986 Threshold: 0, 987 }, 988 }, 989 }) 990 addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333") 991 require.NoError(t, err) 992 inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected) 993 createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected) 994 995 result := p.Inbound() 996 require.Equal(t, 1, len(result)) 997 assert.Equal(t, inbound.Pretty(), result[0].Pretty()) 998 } 999 1000 func TestOutbound(t *testing.T) { 1001 p := peers.NewStatus(context.Background(), &peers.StatusConfig{ 1002 PeerLimit: 30, 1003 ScorerParams: &scorers.Config{ 1004 BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ 1005 Threshold: 0, 1006 }, 1007 }, 1008 }) 1009 addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333") 1010 require.NoError(t, err) 1011 createPeer(t, p, addr, network.DirInbound, peers.PeerConnected) 1012 outbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected) 1013 1014 result := p.Outbound() 1015 require.Equal(t, 1, len(result)) 1016 assert.Equal(t, outbound.Pretty(), result[0].Pretty()) 1017 } 1018 1019 // addPeer is a helper to add a peer with a given connection state) 1020 func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID { 1021 // Set up some peers with different states 1022 mhBytes := []byte{0x11, 0x04} 1023 idBytes := make([]byte, 4) 1024 _, err := rand.Read(idBytes) 1025 require.NoError(t, err) 1026 mhBytes = append(mhBytes, idBytes...) 1027 id, err := peer.IDFromBytes(mhBytes) 1028 require.NoError(t, err) 1029 p.Add(new(enr.Record), id, nil, network.DirUnknown) 1030 p.SetConnectionState(id, state) 1031 p.SetMetadata(id, interfaces.WrappedMetadataV0(&pb.MetaDataV0{ 1032 SeqNumber: 0, 1033 Attnets: bitfield.NewBitvector64(), 1034 })) 1035 return id 1036 } 1037 1038 func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr, 1039 dir network.Direction, state peerdata.PeerConnectionState) peer.ID { 1040 mhBytes := []byte{0x11, 0x04} 1041 idBytes := make([]byte, 4) 1042 _, err := rand.Read(idBytes) 1043 require.NoError(t, err) 1044 mhBytes = append(mhBytes, idBytes...) 1045 id, err := peer.IDFromBytes(mhBytes) 1046 require.NoError(t, err) 1047 p.Add(new(enr.Record), id, addr, dir) 1048 p.SetConnectionState(id, state) 1049 return id 1050 }