github.com/devwanda/aphelion-staking@v0.33.9/p2p/switch_test.go (about) 1 package p2p 2 3 import ( 4 "bytes" 5 "errors" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "net" 10 "net/http" 11 "net/http/httptest" 12 "regexp" 13 "strconv" 14 "sync" 15 "sync/atomic" 16 "testing" 17 "time" 18 19 "github.com/prometheus/client_golang/prometheus/promhttp" 20 "github.com/stretchr/testify/assert" 21 "github.com/stretchr/testify/require" 22 23 "github.com/devwanda/aphelion-staking/config" 24 "github.com/devwanda/aphelion-staking/crypto/ed25519" 25 "github.com/devwanda/aphelion-staking/libs/log" 26 "github.com/devwanda/aphelion-staking/p2p/conn" 27 ) 28 29 var ( 30 cfg *config.P2PConfig 31 ) 32 33 func init() { 34 cfg = config.DefaultP2PConfig() 35 cfg.PexReactor = true 36 cfg.AllowDuplicateIP = true 37 } 38 39 type PeerMessage struct { 40 PeerID ID 41 Bytes []byte 42 Counter int 43 } 44 45 type TestReactor struct { 46 BaseReactor 47 48 mtx sync.Mutex 49 channels []*conn.ChannelDescriptor 50 logMessages bool 51 msgsCounter int 52 msgsReceived map[byte][]PeerMessage 53 } 54 55 func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { 56 tr := &TestReactor{ 57 channels: channels, 58 logMessages: logMessages, 59 msgsReceived: make(map[byte][]PeerMessage), 60 } 61 tr.BaseReactor = *NewBaseReactor("TestReactor", tr) 62 tr.SetLogger(log.TestingLogger()) 63 return tr 64 } 65 66 func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { 67 return tr.channels 68 } 69 70 func (tr *TestReactor) AddPeer(peer Peer) {} 71 72 func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} 73 74 func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { 75 if tr.logMessages { 76 tr.mtx.Lock() 77 defer tr.mtx.Unlock() 78 //fmt.Printf("Received: %X, %X\n", chID, msgBytes) 79 tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) 80 tr.msgsCounter++ 81 } 82 } 83 84 func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { 85 tr.mtx.Lock() 86 defer tr.mtx.Unlock() 87 return tr.msgsReceived[chID] 88 } 89 90 //----------------------------------------------------------------------------- 91 92 // convenience method for creating two switches connected to each other. 93 // XXX: note this uses net.Pipe and not a proper TCP conn 94 func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { 95 // Create two switches that will be interconnected. 96 switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) 97 return switches[0], switches[1] 98 } 99 100 func initSwitchFunc(i int, sw *Switch) *Switch { 101 sw.SetAddrBook(&addrBookMock{ 102 addrs: make(map[string]struct{}), 103 ourAddrs: make(map[string]struct{})}) 104 105 // Make two reactors of two channels each 106 sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ 107 {ID: byte(0x00), Priority: 10}, 108 {ID: byte(0x01), Priority: 10}, 109 }, true)) 110 sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ 111 {ID: byte(0x02), Priority: 10}, 112 {ID: byte(0x03), Priority: 10}, 113 }, true)) 114 115 return sw 116 } 117 118 func TestSwitches(t *testing.T) { 119 s1, s2 := MakeSwitchPair(t, initSwitchFunc) 120 defer s1.Stop() 121 defer s2.Stop() 122 123 if s1.Peers().Size() != 1 { 124 t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size()) 125 } 126 if s2.Peers().Size() != 1 { 127 t.Errorf("expected exactly 1 peer in s2, got %v", s2.Peers().Size()) 128 } 129 130 // Lets send some messages 131 ch0Msg := []byte("channel zero") 132 ch1Msg := []byte("channel foo") 133 ch2Msg := []byte("channel bar") 134 135 s1.Broadcast(byte(0x00), ch0Msg) 136 s1.Broadcast(byte(0x01), ch1Msg) 137 s1.Broadcast(byte(0x02), ch2Msg) 138 139 assertMsgReceivedWithTimeout(t, 140 ch0Msg, 141 byte(0x00), 142 s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) 143 assertMsgReceivedWithTimeout(t, 144 ch1Msg, 145 byte(0x01), 146 s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) 147 assertMsgReceivedWithTimeout(t, 148 ch2Msg, 149 byte(0x02), 150 s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) 151 } 152 153 func assertMsgReceivedWithTimeout( 154 t *testing.T, 155 msgBytes []byte, 156 channel byte, 157 reactor *TestReactor, 158 checkPeriod, 159 timeout time.Duration, 160 ) { 161 ticker := time.NewTicker(checkPeriod) 162 for { 163 select { 164 case <-ticker.C: 165 msgs := reactor.getMsgs(channel) 166 if len(msgs) > 0 { 167 if !bytes.Equal(msgs[0].Bytes, msgBytes) { 168 t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) 169 } 170 return 171 } 172 173 case <-time.After(timeout): 174 t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) 175 } 176 } 177 } 178 179 func TestSwitchFiltersOutItself(t *testing.T) { 180 s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) 181 182 // simulate s1 having a public IP by creating a remote peer with the same ID 183 rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} 184 rp.Start() 185 186 // addr should be rejected in addPeer based on the same ID 187 err := s1.DialPeerWithAddress(rp.Addr()) 188 if assert.Error(t, err) { 189 if err, ok := err.(ErrRejected); ok { 190 if !err.IsSelf() { 191 t.Errorf("expected self to be rejected") 192 } 193 } else { 194 t.Errorf("expected ErrRejected") 195 } 196 } 197 198 assert.True(t, s1.addrBook.OurAddress(rp.Addr())) 199 assert.False(t, s1.addrBook.HasAddress(rp.Addr())) 200 201 rp.Stop() 202 203 assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) 204 } 205 206 func TestSwitchPeerFilter(t *testing.T) { 207 var ( 208 filters = []PeerFilterFunc{ 209 func(_ IPeerSet, _ Peer) error { return nil }, 210 func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") }, 211 func(_ IPeerSet, _ Peer) error { return nil }, 212 } 213 sw = MakeSwitch( 214 cfg, 215 1, 216 "testing", 217 "123.123.123", 218 initSwitchFunc, 219 SwitchPeerFilters(filters...), 220 ) 221 ) 222 defer sw.Stop() 223 224 // simulate remote peer 225 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 226 rp.Start() 227 defer rp.Stop() 228 229 p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ 230 chDescs: sw.chDescs, 231 onPeerError: sw.StopPeerForError, 232 isPersistent: sw.IsPeerPersistent, 233 reactorsByCh: sw.reactorsByCh, 234 }) 235 if err != nil { 236 t.Fatal(err) 237 } 238 239 err = sw.addPeer(p) 240 if err, ok := err.(ErrRejected); ok { 241 if !err.IsFiltered() { 242 t.Errorf("expected peer to be filtered") 243 } 244 } else { 245 t.Errorf("expected ErrRejected") 246 } 247 } 248 249 func TestSwitchPeerFilterTimeout(t *testing.T) { 250 var ( 251 filters = []PeerFilterFunc{ 252 func(_ IPeerSet, _ Peer) error { 253 time.Sleep(10 * time.Millisecond) 254 return nil 255 }, 256 } 257 sw = MakeSwitch( 258 cfg, 259 1, 260 "testing", 261 "123.123.123", 262 initSwitchFunc, 263 SwitchFilterTimeout(5*time.Millisecond), 264 SwitchPeerFilters(filters...), 265 ) 266 ) 267 defer sw.Stop() 268 269 // simulate remote peer 270 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 271 rp.Start() 272 defer rp.Stop() 273 274 p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ 275 chDescs: sw.chDescs, 276 onPeerError: sw.StopPeerForError, 277 isPersistent: sw.IsPeerPersistent, 278 reactorsByCh: sw.reactorsByCh, 279 }) 280 if err != nil { 281 t.Fatal(err) 282 } 283 284 err = sw.addPeer(p) 285 if _, ok := err.(ErrFilterTimeout); !ok { 286 t.Errorf("expected ErrFilterTimeout") 287 } 288 } 289 290 func TestSwitchPeerFilterDuplicate(t *testing.T) { 291 sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) 292 sw.Start() 293 defer sw.Stop() 294 295 // simulate remote peer 296 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 297 rp.Start() 298 defer rp.Stop() 299 300 p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ 301 chDescs: sw.chDescs, 302 onPeerError: sw.StopPeerForError, 303 isPersistent: sw.IsPeerPersistent, 304 reactorsByCh: sw.reactorsByCh, 305 }) 306 if err != nil { 307 t.Fatal(err) 308 } 309 310 if err := sw.addPeer(p); err != nil { 311 t.Fatal(err) 312 } 313 314 err = sw.addPeer(p) 315 if errRej, ok := err.(ErrRejected); ok { 316 if !errRej.IsDuplicate() { 317 t.Errorf("expected peer to be duplicate. got %v", errRej) 318 } 319 } else { 320 t.Errorf("expected ErrRejected, got %v", err) 321 } 322 } 323 324 func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { 325 time.Sleep(timeout) 326 if sw.Peers().Size() != 0 { 327 t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) 328 } 329 } 330 331 func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { 332 assert, require := assert.New(t), require.New(t) 333 334 sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) 335 err := sw.Start() 336 if err != nil { 337 t.Error(err) 338 } 339 defer sw.Stop() 340 341 // simulate remote peer 342 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 343 rp.Start() 344 defer rp.Stop() 345 346 p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ 347 chDescs: sw.chDescs, 348 onPeerError: sw.StopPeerForError, 349 isPersistent: sw.IsPeerPersistent, 350 reactorsByCh: sw.reactorsByCh, 351 }) 352 require.Nil(err) 353 354 err = sw.addPeer(p) 355 require.Nil(err) 356 357 require.NotNil(sw.Peers().Get(rp.ID())) 358 359 // simulate failure by closing connection 360 p.(*peer).CloseConn() 361 362 assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) 363 assert.False(p.IsRunning()) 364 } 365 366 func TestSwitchStopPeerForError(t *testing.T) { 367 s := httptest.NewServer(promhttp.Handler()) 368 defer s.Close() 369 370 scrapeMetrics := func() string { 371 resp, err := http.Get(s.URL) 372 assert.NoError(t, err) 373 defer resp.Body.Close() 374 buf, _ := ioutil.ReadAll(resp.Body) 375 return string(buf) 376 } 377 378 namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers" 379 re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) 380 peersMetricValue := func() float64 { 381 matches := re.FindStringSubmatch(scrapeMetrics()) 382 f, _ := strconv.ParseFloat(matches[1], 64) 383 return f 384 } 385 386 p2pMetrics := PrometheusMetrics(namespace) 387 388 // make two connected switches 389 sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { 390 // set metrics on sw1 391 if i == 0 { 392 opt := WithMetrics(p2pMetrics) 393 opt(sw) 394 } 395 return initSwitchFunc(i, sw) 396 }) 397 398 assert.Equal(t, len(sw1.Peers().List()), 1) 399 assert.EqualValues(t, 1, peersMetricValue()) 400 401 // send messages to the peer from sw1 402 p := sw1.Peers().List()[0] 403 p.Send(0x1, []byte("here's a message to send")) 404 405 // stop sw2. this should cause the p to fail, 406 // which results in calling StopPeerForError internally 407 sw2.Stop() 408 409 // now call StopPeerForError explicitly, eg. from a reactor 410 sw1.StopPeerForError(p, fmt.Errorf("some err")) 411 412 assert.Equal(t, len(sw1.Peers().List()), 0) 413 assert.EqualValues(t, 0, peersMetricValue()) 414 } 415 416 func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { 417 sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) 418 err := sw.Start() 419 require.NoError(t, err) 420 defer sw.Stop() 421 422 // 1. simulate failure by closing connection 423 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 424 rp.Start() 425 defer rp.Stop() 426 427 err = sw.AddPersistentPeers([]string{rp.Addr().String()}) 428 require.NoError(t, err) 429 430 err = sw.DialPeerWithAddress(rp.Addr()) 431 require.Nil(t, err) 432 require.NotNil(t, sw.Peers().Get(rp.ID())) 433 434 p := sw.Peers().List()[0] 435 p.(*peer).CloseConn() 436 437 waitUntilSwitchHasAtLeastNPeers(sw, 1) 438 assert.False(t, p.IsRunning()) // old peer instance 439 assert.Equal(t, 1, sw.Peers().Size()) // new peer instance 440 441 // 2. simulate first time dial failure 442 rp = &remotePeer{ 443 PrivKey: ed25519.GenPrivKey(), 444 Config: cfg, 445 // Use different interface to prevent duplicate IP filter, this will break 446 // beyond two peers. 447 listenAddr: "127.0.0.1:0", 448 } 449 rp.Start() 450 defer rp.Stop() 451 452 conf := config.DefaultP2PConfig() 453 conf.TestDialFail = true // will trigger a reconnect 454 err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) 455 require.NotNil(t, err) 456 // DialPeerWithAddres - sw.peerConfig resets the dialer 457 waitUntilSwitchHasAtLeastNPeers(sw, 2) 458 assert.Equal(t, 2, sw.Peers().Size()) 459 } 460 461 func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { 462 sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) 463 err := sw.Start() 464 require.NoError(t, err) 465 defer sw.Stop() 466 467 // 1. simulate failure by closing the connection 468 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 469 rp.Start() 470 defer rp.Stop() 471 472 err = sw.AddPersistentPeers([]string{rp.Addr().String()}) 473 require.NoError(t, err) 474 475 conn, err := rp.Dial(sw.NetAddress()) 476 require.NoError(t, err) 477 time.Sleep(50 * time.Millisecond) 478 require.NotNil(t, sw.Peers().Get(rp.ID())) 479 480 conn.Close() 481 482 waitUntilSwitchHasAtLeastNPeers(sw, 1) 483 assert.Equal(t, 1, sw.Peers().Size()) 484 } 485 486 func TestSwitchDialPeersAsync(t *testing.T) { 487 if testing.Short() { 488 return 489 } 490 491 sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) 492 err := sw.Start() 493 require.NoError(t, err) 494 defer sw.Stop() 495 496 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 497 rp.Start() 498 defer rp.Stop() 499 500 err = sw.DialPeersAsync([]string{rp.Addr().String()}) 501 require.NoError(t, err) 502 time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) 503 require.NotNil(t, sw.Peers().Get(rp.ID())) 504 } 505 506 func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { 507 for i := 0; i < 20; i++ { 508 time.Sleep(250 * time.Millisecond) 509 has := sw.Peers().Size() 510 if has >= n { 511 break 512 } 513 } 514 } 515 516 func TestSwitchFullConnectivity(t *testing.T) { 517 switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) 518 defer func() { 519 for _, sw := range switches { 520 sw.Stop() 521 } 522 }() 523 524 for i, sw := range switches { 525 if sw.Peers().Size() != 2 { 526 t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) 527 } 528 } 529 } 530 531 func TestSwitchAcceptRoutine(t *testing.T) { 532 cfg.MaxNumInboundPeers = 5 533 534 // Create some unconditional peers. 535 const unconditionalPeersNum = 2 536 var ( 537 unconditionalPeers = make([]*remotePeer, unconditionalPeersNum) 538 unconditionalPeerIDs = make([]string, unconditionalPeersNum) 539 ) 540 for i := 0; i < unconditionalPeersNum; i++ { 541 peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 542 peer.Start() 543 unconditionalPeers[i] = peer 544 unconditionalPeerIDs[i] = string(peer.ID()) 545 } 546 547 // make switch 548 sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) 549 sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) 550 err := sw.Start() 551 require.NoError(t, err) 552 defer sw.Stop() 553 554 // 0. check there are no peers 555 assert.Equal(t, 0, sw.Peers().Size()) 556 557 // 1. check we connect up to MaxNumInboundPeers 558 peers := make([]*remotePeer, 0) 559 for i := 0; i < cfg.MaxNumInboundPeers; i++ { 560 peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 561 peers = append(peers, peer) 562 peer.Start() 563 c, err := peer.Dial(sw.NetAddress()) 564 require.NoError(t, err) 565 // spawn a reading routine to prevent connection from closing 566 go func(c net.Conn) { 567 for { 568 one := make([]byte, 1) 569 _, err := c.Read(one) 570 if err != nil { 571 return 572 } 573 } 574 }(c) 575 } 576 time.Sleep(10 * time.Millisecond) 577 assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) 578 579 // 2. check we close new connections if we already have MaxNumInboundPeers peers 580 peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 581 peer.Start() 582 conn, err := peer.Dial(sw.NetAddress()) 583 require.NoError(t, err) 584 // check conn is closed 585 one := make([]byte, 1) 586 conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) 587 _, err = conn.Read(one) 588 assert.Equal(t, io.EOF, err) 589 assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) 590 peer.Stop() 591 592 // 3. check we connect to unconditional peers despite the limit. 593 for _, peer := range unconditionalPeers { 594 c, err := peer.Dial(sw.NetAddress()) 595 require.NoError(t, err) 596 // spawn a reading routine to prevent connection from closing 597 go func(c net.Conn) { 598 for { 599 one := make([]byte, 1) 600 _, err := c.Read(one) 601 if err != nil { 602 return 603 } 604 } 605 }(c) 606 } 607 time.Sleep(10 * time.Millisecond) 608 assert.Equal(t, cfg.MaxNumInboundPeers+unconditionalPeersNum, sw.Peers().Size()) 609 610 for _, peer := range peers { 611 peer.Stop() 612 } 613 for _, peer := range unconditionalPeers { 614 peer.Stop() 615 } 616 } 617 618 type errorTransport struct { 619 acceptErr error 620 } 621 622 func (et errorTransport) NetAddress() NetAddress { 623 panic("not implemented") 624 } 625 626 func (et errorTransport) Accept(c peerConfig) (Peer, error) { 627 return nil, et.acceptErr 628 } 629 func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) { 630 panic("not implemented") 631 } 632 func (errorTransport) Cleanup(Peer) { 633 panic("not implemented") 634 } 635 636 func TestSwitchAcceptRoutineErrorCases(t *testing.T) { 637 sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) 638 assert.NotPanics(t, func() { 639 err := sw.Start() 640 assert.NoError(t, err) 641 sw.Stop() 642 }) 643 644 sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) 645 assert.NotPanics(t, func() { 646 err := sw.Start() 647 assert.NoError(t, err) 648 sw.Stop() 649 }) 650 // TODO(melekes) check we remove our address from addrBook 651 652 sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) 653 assert.NotPanics(t, func() { 654 err := sw.Start() 655 assert.NoError(t, err) 656 sw.Stop() 657 }) 658 } 659 660 // mockReactor checks that InitPeer never called before RemovePeer. If that's 661 // not true, InitCalledBeforeRemoveFinished will return true. 662 type mockReactor struct { 663 *BaseReactor 664 665 // atomic 666 removePeerInProgress uint32 667 initCalledBeforeRemoveFinished uint32 668 } 669 670 func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { 671 atomic.StoreUint32(&r.removePeerInProgress, 1) 672 defer atomic.StoreUint32(&r.removePeerInProgress, 0) 673 time.Sleep(100 * time.Millisecond) 674 } 675 676 func (r *mockReactor) InitPeer(peer Peer) Peer { 677 if atomic.LoadUint32(&r.removePeerInProgress) == 1 { 678 atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) 679 } 680 681 return peer 682 } 683 684 func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { 685 return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 686 } 687 688 // see stopAndRemovePeer 689 func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { 690 // make reactor 691 reactor := &mockReactor{} 692 reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) 693 694 // make switch 695 sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { 696 sw.AddReactor("mock", reactor) 697 return sw 698 }) 699 err := sw.Start() 700 require.NoError(t, err) 701 defer sw.Stop() 702 703 // add peer 704 rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} 705 rp.Start() 706 defer rp.Stop() 707 _, err = rp.Dial(sw.NetAddress()) 708 require.NoError(t, err) 709 // wait till the switch adds rp to the peer set 710 time.Sleep(50 * time.Millisecond) 711 712 // stop peer asynchronously 713 go sw.StopPeerForError(sw.Peers().Get(rp.ID()), "test") 714 715 // simulate peer reconnecting to us 716 _, err = rp.Dial(sw.NetAddress()) 717 require.NoError(t, err) 718 // wait till the switch adds rp to the peer set 719 time.Sleep(50 * time.Millisecond) 720 721 // make sure reactor.RemovePeer is finished before InitPeer is called 722 assert.False(t, reactor.InitCalledBeforeRemoveFinished()) 723 } 724 725 func BenchmarkSwitchBroadcast(b *testing.B) { 726 s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { 727 // Make bar reactors of bar channels each 728 sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ 729 {ID: byte(0x00), Priority: 10}, 730 {ID: byte(0x01), Priority: 10}, 731 }, false)) 732 sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ 733 {ID: byte(0x02), Priority: 10}, 734 {ID: byte(0x03), Priority: 10}, 735 }, false)) 736 return sw 737 }) 738 defer s1.Stop() 739 defer s2.Stop() 740 741 // Allow time for goroutines to boot up 742 time.Sleep(1 * time.Second) 743 744 b.ResetTimer() 745 746 numSuccess, numFailure := 0, 0 747 748 // Send random message from foo channel to another 749 for i := 0; i < b.N; i++ { 750 chID := byte(i % 4) 751 successChan := s1.Broadcast(chID, []byte("test data")) 752 for s := range successChan { 753 if s { 754 numSuccess++ 755 } else { 756 numFailure++ 757 } 758 } 759 } 760 761 b.Logf("success: %v, failure: %v", numSuccess, numFailure) 762 } 763 764 type addrBookMock struct { 765 addrs map[string]struct{} 766 ourAddrs map[string]struct{} 767 } 768 769 var _ AddrBook = (*addrBookMock)(nil) 770 771 func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { 772 book.addrs[addr.String()] = struct{}{} 773 return nil 774 } 775 func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} } 776 func (book *addrBookMock) OurAddress(addr *NetAddress) bool { 777 _, ok := book.ourAddrs[addr.String()] 778 return ok 779 } 780 func (book *addrBookMock) MarkGood(ID) {} 781 func (book *addrBookMock) HasAddress(addr *NetAddress) bool { 782 _, ok := book.addrs[addr.String()] 783 return ok 784 } 785 func (book *addrBookMock) RemoveAddress(addr *NetAddress) { 786 delete(book.addrs, addr.String()) 787 } 788 func (book *addrBookMock) Save() {}