github.com/ari-anchor/sei-tendermint@v0.0.0-20230519144642-dc826b7b56bb/internal/p2p/pex/reactor_test.go (about) 1 //nolint:unused 2 package pex_test 3 4 import ( 5 "context" 6 "errors" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/stretchr/testify/require" 12 dbm "github.com/tendermint/tm-db" 13 14 "github.com/ari-anchor/sei-tendermint/config" 15 "github.com/ari-anchor/sei-tendermint/crypto/ed25519" 16 "github.com/ari-anchor/sei-tendermint/internal/p2p" 17 "github.com/ari-anchor/sei-tendermint/internal/p2p/p2ptest" 18 "github.com/ari-anchor/sei-tendermint/internal/p2p/pex" 19 "github.com/ari-anchor/sei-tendermint/libs/log" 20 p2pproto "github.com/ari-anchor/sei-tendermint/proto/tendermint/p2p" 21 "github.com/ari-anchor/sei-tendermint/types" 22 ) 23 24 const ( 25 checkFrequency = 500 * time.Millisecond 26 defaultBufferSize = 2 27 shortWait = 5 * time.Second 28 longWait = 20 * time.Second 29 30 firstNode = 0 31 secondNode = 1 32 thirdNode = 2 33 ) 34 35 func TestReactorBasic(t *testing.T) { 36 ctx, cancel := context.WithCancel(context.Background()) 37 defer cancel() 38 // start a network with one mock reactor and one "real" reactor 39 testNet := setupNetwork(ctx, t, testOptions{ 40 MockNodes: 1, 41 TotalNodes: 2, 42 }) 43 testNet.connectAll(ctx, t) 44 testNet.start(ctx, t) 45 46 // assert that the mock node receives a request from the real node 47 testNet.listenForRequest(ctx, t, secondNode, firstNode, shortWait) 48 49 // assert that when a mock node sends a request it receives a response (and 50 // the correct one) 51 testNet.sendRequest(ctx, t, firstNode, secondNode) 52 testNet.listenForResponse(ctx, t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) 53 } 54 55 func TestReactorConnectFullNetwork(t *testing.T) { 56 ctx, cancel := context.WithCancel(context.Background()) 57 defer cancel() 58 59 testNet := setupNetwork(ctx, t, testOptions{ 60 TotalNodes: 4, 61 }) 62 63 // make every node be only connected with one other node (it actually ends up 64 // being two because of two way connections but oh well) 65 testNet.connectN(ctx, t, 1) 66 testNet.start(ctx, t) 67 68 // assert that all nodes add each other in the network 69 for idx := 0; idx < len(testNet.nodes); idx++ { 70 testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait) 71 } 72 } 73 74 func TestReactorSendsRequestsTooOften(t *testing.T) { 75 ctx, cancel := context.WithCancel(context.Background()) 76 defer cancel() 77 78 r := setupSingle(ctx, t) 79 80 badNode := newNodeID(t, "b") 81 82 r.pexInCh <- p2p.Envelope{ 83 From: badNode, 84 Message: &p2pproto.PexRequest{}, 85 } 86 87 resp := <-r.pexOutCh 88 msg, ok := resp.Message.(*p2pproto.PexResponse) 89 require.True(t, ok) 90 require.Empty(t, msg.Addresses) 91 92 r.pexInCh <- p2p.Envelope{ 93 From: badNode, 94 Message: &p2pproto.PexRequest{}, 95 } 96 97 peerErr := <-r.pexErrCh 98 require.Error(t, peerErr.Err) 99 require.Empty(t, r.pexOutCh) 100 require.Contains(t, peerErr.Err.Error(), "sent PEX request too soon") 101 require.Equal(t, badNode, peerErr.NodeID) 102 } 103 104 func TestReactorSendsResponseWithoutRequest(t *testing.T) { 105 t.Skip("This test needs updated https://github.com/ari-anchor/sei-tendermint/issue/7634") 106 ctx, cancel := context.WithCancel(context.Background()) 107 defer cancel() 108 109 testNet := setupNetwork(ctx, t, testOptions{ 110 MockNodes: 1, 111 TotalNodes: 3, 112 }) 113 testNet.connectAll(ctx, t) 114 testNet.start(ctx, t) 115 116 // firstNode sends the secondNode an unrequested response 117 // NOTE: secondNode will send a request by default during startup so we send 118 // two responses to counter that. 119 testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) 120 testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) 121 122 // secondNode should evict the firstNode 123 testNet.listenForPeerUpdate(ctx, t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) 124 } 125 126 func TestReactorNeverSendsTooManyPeers(t *testing.T) { 127 t.Skip("This test needs updated https://github.com/ari-anchor/sei-tendermint/issue/7634") 128 ctx, cancel := context.WithCancel(context.Background()) 129 defer cancel() 130 131 testNet := setupNetwork(ctx, t, testOptions{ 132 MockNodes: 1, 133 TotalNodes: 2, 134 }) 135 testNet.connectAll(ctx, t) 136 testNet.start(ctx, t) 137 138 testNet.addNodes(ctx, t, 110) 139 nodes := make([]int, 110) 140 for i := 0; i < len(nodes); i++ { 141 nodes[i] = i + 2 142 } 143 testNet.addAddresses(t, secondNode, nodes) 144 145 // first we check that even although we have 110 peers, honest pex reactors 146 // only send 100 (test if secondNode sends firstNode 100 addresses) 147 testNet.pingAndlistenForNAddresses(ctx, t, secondNode, firstNode, shortWait, 100) 148 } 149 150 func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { 151 ctx, cancel := context.WithCancel(context.Background()) 152 defer cancel() 153 154 r := setupSingle(ctx, t) 155 peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID()} 156 added, err := r.manager.Add(peer) 157 require.NoError(t, err) 158 require.True(t, added) 159 160 addresses := make([]p2pproto.PexAddress, 101) 161 for i := 0; i < len(addresses); i++ { 162 nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID()} 163 addresses[i] = p2pproto.PexAddress{ 164 URL: nodeAddress.String(), 165 } 166 } 167 168 r.peerCh <- p2p.PeerUpdate{ 169 NodeID: peer.NodeID, 170 Status: p2p.PeerStatusUp, 171 } 172 173 select { 174 // wait for a request and then send a response with too many addresses 175 case req := <-r.pexOutCh: 176 if _, ok := req.Message.(*p2pproto.PexRequest); !ok { 177 t.Fatal("expected v2 pex request") 178 } 179 r.pexInCh <- p2p.Envelope{ 180 From: peer.NodeID, 181 Message: &p2pproto.PexResponse{ 182 Addresses: addresses, 183 }, 184 } 185 186 case <-time.After(10 * time.Second): 187 t.Fatal("pex failed to send a request within 10 seconds") 188 } 189 190 peerErr := <-r.pexErrCh 191 require.Error(t, peerErr.Err) 192 require.Empty(t, r.pexOutCh) 193 require.Contains(t, peerErr.Err.Error(), "peer sent too many addresses") 194 require.Equal(t, peer.NodeID, peerErr.NodeID) 195 } 196 197 func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { 198 ctx, cancel := context.WithCancel(context.Background()) 199 defer cancel() 200 201 testNet := setupNetwork(ctx, t, testOptions{ 202 TotalNodes: 8, 203 MaxPeers: 4, 204 MaxConnected: 3, 205 BufferSize: 8, 206 MaxRetryTime: 5 * time.Minute, 207 }) 208 testNet.connectN(ctx, t, 1) 209 testNet.start(ctx, t) 210 211 // test that all nodes reach full capacity 212 for _, nodeID := range testNet.nodes { 213 require.Eventually(t, func() bool { 214 // nolint:scopelint 215 return testNet.network.Nodes[nodeID].PeerManager.PeerRatio() >= 0.9 216 }, longWait, checkFrequency, 217 "peer ratio is: %f", testNet.network.Nodes[nodeID].PeerManager.PeerRatio()) 218 } 219 } 220 221 func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { 222 ctx, cancel := context.WithCancel(context.Background()) 223 defer cancel() 224 225 testNet := setupNetwork(ctx, t, testOptions{ 226 TotalNodes: 3, 227 MaxPeers: 25, 228 MaxConnected: 25, 229 BufferSize: 5, 230 MaxRetryTime: 5 * time.Minute, 231 }) 232 testNet.connectN(ctx, t, 1) 233 testNet.start(ctx, t) 234 235 // assert that all nodes add each other in the network 236 for idx := 0; idx < len(testNet.nodes); idx++ { 237 testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait) 238 } 239 } 240 241 func TestReactorWithNetworkGrowth(t *testing.T) { 242 t.Skip("This test needs updated https://github.com/ari-anchor/sei-tendermint/issue/7634") 243 ctx, cancel := context.WithCancel(context.Background()) 244 defer cancel() 245 246 testNet := setupNetwork(ctx, t, testOptions{ 247 TotalNodes: 5, 248 BufferSize: 5, 249 }) 250 testNet.connectAll(ctx, t) 251 testNet.start(ctx, t) 252 253 // assert that all nodes add each other in the network 254 for idx := 0; idx < len(testNet.nodes); idx++ { 255 testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, shortWait) 256 } 257 258 // now we inject 10 more nodes 259 testNet.addNodes(ctx, t, 10) 260 for i := 5; i < testNet.total; i++ { 261 node := testNet.nodes[i] 262 require.NoError(t, testNet.reactors[node].Start(ctx)) 263 require.True(t, testNet.reactors[node].IsRunning()) 264 // we connect all new nodes to a single entry point and check that the 265 // node can distribute the addresses to all the others 266 testNet.connectPeers(ctx, t, 0, i) 267 } 268 require.Len(t, testNet.reactors, 15) 269 270 // assert that all nodes add each other in the network 271 for idx := 0; idx < len(testNet.nodes); idx++ { 272 testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait) 273 } 274 } 275 276 type singleTestReactor struct { 277 reactor *pex.Reactor 278 pexInCh chan p2p.Envelope 279 pexOutCh chan p2p.Envelope 280 pexErrCh chan p2p.PeerError 281 pexCh *p2p.Channel 282 peerCh chan p2p.PeerUpdate 283 manager *p2p.PeerManager 284 } 285 286 func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { 287 t.Helper() 288 nodeID := newNodeID(t, "a") 289 chBuf := 2 290 pexInCh := make(chan p2p.Envelope, chBuf) 291 pexOutCh := make(chan p2p.Envelope, chBuf) 292 pexErrCh := make(chan p2p.PeerError, chBuf) 293 pexCh := p2p.NewChannel( 294 p2p.ChannelID(pex.PexChannel), 295 pexInCh, 296 pexOutCh, 297 pexErrCh, 298 ) 299 300 peerCh := make(chan p2p.PeerUpdate, chBuf) 301 peerUpdates := p2p.NewPeerUpdates(peerCh, chBuf) 302 peerManager, err := p2p.NewPeerManager(log.NewNopLogger(), nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) 303 require.NoError(t, err) 304 305 reactor := pex.NewReactor( 306 log.NewNopLogger(), 307 peerManager, 308 func(_ context.Context) *p2p.PeerUpdates { return peerUpdates }, 309 make(chan struct{}), 310 config.DefaultSelfRemediationConfig(), 311 ) 312 reactor.SetChannel(pexCh) 313 314 require.NoError(t, reactor.Start(ctx)) 315 t.Cleanup(reactor.Wait) 316 317 return &singleTestReactor{ 318 reactor: reactor, 319 pexInCh: pexInCh, 320 pexOutCh: pexOutCh, 321 pexErrCh: pexErrCh, 322 pexCh: pexCh, 323 peerCh: peerCh, 324 manager: peerManager, 325 } 326 } 327 328 type reactorTestSuite struct { 329 network *p2ptest.Network 330 logger log.Logger 331 332 reactors map[types.NodeID]*pex.Reactor 333 pexChannels map[types.NodeID]*p2p.Channel 334 335 peerChans map[types.NodeID]chan p2p.PeerUpdate 336 peerUpdates map[types.NodeID]*p2p.PeerUpdates 337 338 nodes []types.NodeID 339 mocks []types.NodeID 340 total int 341 opts testOptions 342 } 343 344 type testOptions struct { 345 MockNodes int 346 TotalNodes int 347 BufferSize int 348 MaxPeers uint16 349 MaxConnected uint16 350 MaxRetryTime time.Duration 351 } 352 353 // setup setups a test suite with a network of nodes. Mocknodes represent the 354 // hollow nodes that the test can listen and send on 355 func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite { 356 t.Helper() 357 358 require.Greater(t, opts.TotalNodes, opts.MockNodes) 359 if opts.BufferSize == 0 { 360 opts.BufferSize = defaultBufferSize 361 } 362 networkOpts := p2ptest.NetworkOptions{ 363 NumNodes: opts.TotalNodes, 364 BufferSize: opts.BufferSize, 365 NodeOpts: p2ptest.NodeOptions{ 366 MaxPeers: opts.MaxPeers, 367 MaxConnected: opts.MaxConnected, 368 MaxRetryTime: opts.MaxRetryTime, 369 }, 370 } 371 chBuf := opts.BufferSize 372 realNodes := opts.TotalNodes - opts.MockNodes 373 374 rts := &reactorTestSuite{ 375 logger: log.NewNopLogger().With("testCase", t.Name()), 376 network: p2ptest.MakeNetwork(ctx, t, networkOpts), 377 reactors: make(map[types.NodeID]*pex.Reactor, realNodes), 378 pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), 379 peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), 380 peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), 381 total: opts.TotalNodes, 382 opts: opts, 383 } 384 385 // NOTE: we don't assert that the channels get drained after stopping the 386 // reactor 387 rts.pexChannels = rts.network.MakeChannelsNoCleanup(ctx, t, pex.ChannelDescriptor()) 388 389 idx := 0 390 for nodeID := range rts.network.Nodes { 391 // make a copy to avoid getting hit by the range ref 392 // confusion: 393 nodeID := nodeID 394 395 rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) 396 rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf) 397 rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) 398 399 // the first nodes in the array are always mock nodes 400 if idx < opts.MockNodes { 401 rts.mocks = append(rts.mocks, nodeID) 402 } else { 403 rts.reactors[nodeID] = pex.NewReactor( 404 rts.logger.With("nodeID", nodeID), 405 rts.network.Nodes[nodeID].PeerManager, 406 func(_ context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, 407 make(chan struct{}), 408 config.DefaultSelfRemediationConfig(), 409 ) 410 rts.reactors[nodeID].SetChannel(rts.pexChannels[nodeID]) 411 } 412 rts.nodes = append(rts.nodes, nodeID) 413 414 idx++ 415 } 416 417 require.Len(t, rts.reactors, realNodes) 418 419 t.Cleanup(func() { 420 for _, reactor := range rts.reactors { 421 if reactor.IsRunning() { 422 reactor.Wait() 423 require.False(t, reactor.IsRunning()) 424 } 425 } 426 }) 427 428 return rts 429 } 430 431 // starts up the pex reactors for each node 432 func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) { 433 t.Helper() 434 435 for name, reactor := range r.reactors { 436 require.NoError(t, reactor.Start(ctx)) 437 require.True(t, reactor.IsRunning()) 438 t.Log("started", name) 439 } 440 } 441 442 func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) { 443 t.Helper() 444 445 for i := 0; i < nodes; i++ { 446 node := r.network.MakeNode(ctx, t, p2ptest.NodeOptions{ 447 MaxPeers: r.opts.MaxPeers, 448 MaxConnected: r.opts.MaxConnected, 449 MaxRetryTime: r.opts.MaxRetryTime, 450 }) 451 r.network.Nodes[node.NodeID] = node 452 nodeID := node.NodeID 453 r.pexChannels[nodeID] = node.MakeChannelNoCleanup(ctx, t, pex.ChannelDescriptor()) 454 r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) 455 r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) 456 r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID]) 457 458 r.reactors[nodeID] = pex.NewReactor( 459 r.logger.With("nodeID", nodeID), 460 r.network.Nodes[nodeID].PeerManager, 461 func(_ context.Context) *p2p.PeerUpdates { return r.peerUpdates[nodeID] }, 462 make(chan struct{}), 463 config.DefaultSelfRemediationConfig(), 464 ) 465 r.nodes = append(r.nodes, nodeID) 466 r.total++ 467 } 468 } 469 470 func (r *reactorTestSuite) listenFor( 471 ctx context.Context, 472 t *testing.T, 473 node types.NodeID, 474 conditional func(msg *p2p.Envelope) bool, 475 assertion func(t *testing.T, msg *p2p.Envelope) bool, 476 waitPeriod time.Duration, 477 ) { 478 ctx, cancel := context.WithTimeout(ctx, waitPeriod) 479 defer cancel() 480 iter := r.pexChannels[node].Receive(ctx) 481 for iter.Next(ctx) { 482 envelope := iter.Envelope() 483 if conditional(envelope) && assertion(t, envelope) { 484 return 485 } 486 } 487 488 if errors.Is(ctx.Err(), context.DeadlineExceeded) { 489 require.Fail(t, "timed out waiting for message", 490 "node=%v, waitPeriod=%s", node, waitPeriod) 491 } 492 493 } 494 495 func (r *reactorTestSuite) listenForRequest(ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration) { 496 to, from := r.checkNodePair(t, toNode, fromNode) 497 conditional := func(msg *p2p.Envelope) bool { 498 _, ok := msg.Message.(*p2pproto.PexRequest) 499 return ok && msg.From == from 500 } 501 assertion := func(t *testing.T, msg *p2p.Envelope) bool { 502 require.Equal(t, &p2pproto.PexRequest{}, msg.Message) 503 return true 504 } 505 r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) 506 } 507 508 func (r *reactorTestSuite) pingAndlistenForNAddresses( 509 ctx context.Context, 510 t *testing.T, 511 fromNode, toNode int, 512 waitPeriod time.Duration, 513 addresses int, 514 ) { 515 t.Helper() 516 517 to, from := r.checkNodePair(t, toNode, fromNode) 518 conditional := func(msg *p2p.Envelope) bool { 519 _, ok := msg.Message.(*p2pproto.PexResponse) 520 return ok && msg.From == from 521 } 522 assertion := func(t *testing.T, msg *p2p.Envelope) bool { 523 m, ok := msg.Message.(*p2pproto.PexResponse) 524 if !ok { 525 require.Fail(t, "expected pex response v2") 526 return true 527 } 528 // assert the same amount of addresses 529 if len(m.Addresses) == addresses { 530 return true 531 } 532 // if we didn't get the right length, we wait and send the 533 // request again 534 time.Sleep(300 * time.Millisecond) 535 r.sendRequest(ctx, t, toNode, fromNode) 536 return false 537 } 538 r.sendRequest(ctx, t, toNode, fromNode) 539 r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) 540 } 541 542 func (r *reactorTestSuite) listenForResponse( 543 ctx context.Context, 544 t *testing.T, 545 fromNode, toNode int, 546 waitPeriod time.Duration, 547 addresses []p2pproto.PexAddress, 548 ) { 549 to, from := r.checkNodePair(t, toNode, fromNode) 550 conditional := func(msg *p2p.Envelope) bool { 551 _, ok := msg.Message.(*p2pproto.PexResponse) 552 return ok && msg.From == from 553 } 554 assertion := func(t *testing.T, msg *p2p.Envelope) bool { 555 require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message) 556 return true 557 } 558 r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) 559 } 560 561 func (r *reactorTestSuite) listenForPeerUpdate( 562 ctx context.Context, 563 t *testing.T, 564 onNode, withNode int, 565 status p2p.PeerStatus, 566 waitPeriod time.Duration, 567 ) { 568 on, with := r.checkNodePair(t, onNode, withNode) 569 sub := r.network.Nodes[on].PeerManager.Subscribe(ctx) 570 timesUp := time.After(waitPeriod) 571 for { 572 select { 573 case <-ctx.Done(): 574 require.Fail(t, "operation canceled") 575 return 576 case peerUpdate := <-sub.Updates(): 577 if peerUpdate.NodeID == with { 578 require.Equal(t, status, peerUpdate.Status) 579 return 580 } 581 582 case <-timesUp: 583 require.Fail(t, "timed out waiting for peer status", "%v with status %v", 584 with, status) 585 return 586 } 587 } 588 } 589 590 func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress { 591 addresses := make([]p2pproto.PexAddress, len(nodes)) 592 for idx, node := range nodes { 593 nodeID := r.nodes[node] 594 addresses[idx] = p2pproto.PexAddress{ 595 URL: r.network.Nodes[nodeID].NodeAddress.String(), 596 } 597 } 598 return addresses 599 } 600 601 func (r *reactorTestSuite) sendRequest(ctx context.Context, t *testing.T, fromNode, toNode int) { 602 t.Helper() 603 to, from := r.checkNodePair(t, toNode, fromNode) 604 require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ 605 To: to, 606 Message: &p2pproto.PexRequest{}, 607 })) 608 } 609 610 func (r *reactorTestSuite) sendResponse( 611 ctx context.Context, 612 t *testing.T, 613 fromNode, toNode int, 614 withNodes []int, 615 ) { 616 t.Helper() 617 from, to := r.checkNodePair(t, fromNode, toNode) 618 addrs := r.getAddressesFor(withNodes) 619 require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ 620 To: to, 621 Message: &p2pproto.PexResponse{ 622 Addresses: addrs, 623 }, 624 })) 625 } 626 627 func (r *reactorTestSuite) requireNumberOfPeers( 628 t *testing.T, 629 nodeIndex, numPeers int, 630 waitPeriod time.Duration, 631 ) { 632 t.Helper() 633 require.Eventuallyf(t, func() bool { 634 actualNumPeers := len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers()) 635 return actualNumPeers >= numPeers 636 }, waitPeriod, checkFrequency, "peer failed to connect with the asserted amount of peers "+ 637 "index=%d, node=%q, waitPeriod=%s expected=%d actual=%d", 638 nodeIndex, r.nodes[nodeIndex], waitPeriod, numPeers, 639 len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers()), 640 ) 641 } 642 643 func (r *reactorTestSuite) connectAll(ctx context.Context, t *testing.T) { 644 r.connectN(ctx, t, r.total-1) 645 } 646 647 // connects all nodes with n other nodes 648 func (r *reactorTestSuite) connectN(ctx context.Context, t *testing.T, n int) { 649 if n >= r.total { 650 require.Fail(t, "connectN: n must be less than the size of the network - 1") 651 } 652 653 for i := 0; i < r.total; i++ { 654 for j := 0; j < n; j++ { 655 r.connectPeers(ctx, t, i, (i+j+1)%r.total) 656 } 657 } 658 } 659 660 // connects node1 to node2 661 func (r *reactorTestSuite) connectPeers(ctx context.Context, t *testing.T, sourceNode, targetNode int) { 662 t.Helper() 663 node1, node2 := r.checkNodePair(t, sourceNode, targetNode) 664 665 n1 := r.network.Nodes[node1] 666 if n1 == nil { 667 require.Fail(t, "connectPeers: source node %v is not part of the testnet", node1) 668 return 669 } 670 671 n2 := r.network.Nodes[node2] 672 if n2 == nil { 673 require.Fail(t, "connectPeers: target node %v is not part of the testnet", node2) 674 return 675 } 676 677 sourceSub := n1.PeerManager.Subscribe(ctx) 678 targetSub := n2.PeerManager.Subscribe(ctx) 679 680 sourceAddress := n1.NodeAddress 681 targetAddress := n2.NodeAddress 682 683 added, err := n1.PeerManager.Add(targetAddress) 684 require.NoError(t, err) 685 686 if !added { 687 return 688 } 689 690 select { 691 case peerUpdate := <-targetSub.Updates(): 692 require.Equal(t, peerUpdate.NodeID, node1) 693 require.Equal(t, peerUpdate.Status, p2p.PeerStatusUp) 694 case <-time.After(2 * time.Second): 695 require.Fail(t, "timed out waiting for peer", "%v accepting %v", 696 targetNode, sourceNode) 697 } 698 select { 699 case peerUpdate := <-sourceSub.Updates(): 700 require.Equal(t, peerUpdate.NodeID, node2) 701 require.Equal(t, peerUpdate.Status, p2p.PeerStatusUp) 702 case <-time.After(2 * time.Second): 703 require.Fail(t, "timed out waiting for peer", "%v dialing %v", 704 sourceNode, targetNode) 705 } 706 707 added, err = n2.PeerManager.Add(sourceAddress) 708 require.NoError(t, err) 709 require.True(t, added) 710 } 711 712 func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) { 713 require.NotEqual(t, first, second) 714 require.Less(t, first, r.total) 715 require.Less(t, second, r.total) 716 return r.nodes[first], r.nodes[second] 717 } 718 719 func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) { 720 peerManager := r.network.Nodes[r.nodes[node]].PeerManager 721 for _, addr := range addrs { 722 require.Less(t, addr, r.total) 723 address := r.network.Nodes[r.nodes[addr]].NodeAddress 724 added, err := peerManager.Add(address) 725 require.NoError(t, err) 726 require.True(t, added) 727 } 728 } 729 730 func newNodeID(t *testing.T, id string) types.NodeID { 731 nodeID, err := types.NewNodeID(strings.Repeat(id, 2*types.NodeIDByteLength)) 732 require.NoError(t, err) 733 return nodeID 734 } 735 736 func randomNodeID() types.NodeID { 737 return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey()) 738 }