github.com/aergoio/aergo@v1.3.1/p2p/peermanager_test.go (about) 1 package p2p 2 3 import ( 4 "fmt" 5 "github.com/libp2p/go-libp2p-core/network" 6 "github.com/pkg/errors" 7 "strconv" 8 "sync" 9 "sync/atomic" 10 "testing" 11 "time" 12 13 "github.com/aergoio/aergo-lib/log" 14 cfg "github.com/aergoio/aergo/config" 15 "github.com/aergoio/aergo/message" 16 "github.com/aergoio/aergo/p2p/p2pcommon" 17 "github.com/aergoio/aergo/p2p/p2pkey" 18 "github.com/aergoio/aergo/p2p/p2pmock" 19 "github.com/aergoio/aergo/p2p/p2putil" 20 "github.com/aergoio/aergo/types" 21 "github.com/golang/mock/gomock" 22 "github.com/libp2p/go-libp2p-core/crypto" 23 "github.com/stretchr/testify/assert" 24 ) 25 26 func FailTestGetPeers(t *testing.T) { 27 ctrl := gomock.NewController(t) 28 defer ctrl.Finish() 29 30 mockActor := p2pmock.NewMockActorService(ctrl) 31 dummyBlock := types.Block{Hash: dummyBlockHash, Header: &types.BlockHeader{BlockNo: dummyBlockHeight}} 32 mockActor.EXPECT().CallRequest(gomock.Any(), gomock.Any(), gomock.Any()). 33 Return(message.GetBlockRsp{Block: &dummyBlock}, nil) 34 target := NewPeerManager(nil, nil, mockActor, nil, nil, nil, nil, log.NewLogger("test.p2p"), cfg.NewServerContext("", "").GetDefaultConfig().(*cfg.Config), false).(*peerManager) 35 36 iterSize := 500 37 wg := sync.WaitGroup{} 38 waitChan := make(chan int) 39 wg.Add(1) 40 go func() { 41 for i := 0; i < iterSize; i++ { 42 peerID := types.PeerID(strconv.Itoa(i)) 43 peerMeta := p2pcommon.PeerMeta{ID: peerID} 44 target.remotePeers[peerID] = newRemotePeer(peerMeta, 0, target, mockActor, logger, nil, nil, nil) 45 if i == (iterSize >> 2) { 46 wg.Done() 47 } 48 } 49 }() 50 51 go func() { 52 wg.Wait() 53 for key, val := range target.remotePeers { 54 fmt.Printf("%s is %s\n", key.String(), val.State().String()) 55 } 56 waitChan <- 0 57 }() 58 59 <-waitChan 60 } 61 62 func TestPeerManager_GetPeers(t *testing.T) { 63 ctrl := gomock.NewController(t) 64 defer ctrl.Finish() 65 66 mockActorServ := p2pmock.NewMockActorService(ctrl) 67 68 tLogger := log.NewLogger("test.p2p") 69 tConfig := cfg.NewServerContext("", "").GetDefaultConfig().(*cfg.Config) 70 p2pkey.InitNodeInfo(&tConfig.BaseConfig, tConfig.P2P, "1.0.0-test", tLogger) 71 target := NewPeerManager(nil, nil, mockActorServ, nil, nil, nil, nil, tLogger, tConfig, false).(*peerManager) 72 73 iterSize := 500 74 wg := &sync.WaitGroup{} 75 wgAll := &sync.WaitGroup{} 76 waitChan := make(chan int) 77 wg.Add(1) 78 wgAll.Add(1) 79 go func() { 80 for i := 0; i < iterSize; i++ { 81 peerID := types.PeerID(strconv.Itoa(i)) 82 peerMeta := p2pcommon.PeerMeta{ID: peerID} 83 target.insertPeer(peerID, newRemotePeer(peerMeta, 0, target, mockActorServ, logger, nil, nil, nil)) 84 if i == (iterSize >> 2) { 85 wg.Done() 86 } 87 } 88 wgAll.Done() 89 }() 90 91 cnt := 0 92 go func() { 93 wg.Wait() 94 for _ = range target.GetPeers() { 95 cnt++ 96 } 97 assert.True(t, cnt > (iterSize >> 2)) 98 waitChan <- 0 99 }() 100 101 <-waitChan 102 103 wgAll.Wait() 104 assert.True(t, iterSize == len(target.GetPeers())) 105 } 106 107 func TestPeerManager_GetPeerAddresses(t *testing.T) { 108 peersLen := 6 109 hiddenCnt := 3 110 samplePeers := make([]*remotePeerImpl, peersLen) 111 for i := 0; i < peersLen; i++ { 112 pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) 113 pid, _ := types.IDFromPrivateKey(pkey) 114 samplePeers[i] = &remotePeerImpl{meta: p2pcommon.PeerMeta{ID: pid, Hidden: i < hiddenCnt}, lastStatus: &types.LastBlockStatus{}} 115 } 116 117 tests := []struct { 118 name string 119 120 hidden bool 121 showSelf bool 122 123 wantCnt int 124 }{ 125 {"TDefault", false, false, peersLen}, 126 {"TWSelf", false, true, peersLen + 1}, 127 {"TWOHidden", true, false, peersLen - hiddenCnt}, 128 {"TWOHiddenWSelf", false, true, peersLen - hiddenCnt + 1}, 129 } 130 for _, test := range tests { 131 t.Run(test.name, func(t *testing.T) { 132 pm := &peerManager{ 133 remotePeers: make(map[types.PeerID]p2pcommon.RemotePeer), 134 mutex: &sync.Mutex{}, 135 } 136 for _, peer := range samplePeers { 137 pm.remotePeers[peer.ID()] = peer 138 } 139 pm.updatePeerCache() 140 141 actPeers := pm.GetPeerAddresses(false, false) 142 assert.Equal(t, peersLen, len(actPeers)) 143 }) 144 } 145 } 146 147 func TestPeerManager_init(t *testing.T) { 148 tConfig := cfg.NewServerContext("", "").GetDefaultConfig().(*cfg.Config) 149 defaultCfg := tConfig.P2P 150 p2pkey.InitNodeInfo(&tConfig.BaseConfig, defaultCfg, "1.0.0-test", logger) 151 localIP, _ := p2putil.ExternalIP() 152 153 tests := []struct { 154 name string 155 inCfg *cfg.P2PConfig 156 expectProtoAddr string 157 expectProtoPort uint32 158 expectBindAddr string 159 expectBindPort uint32 160 expectPanic bool 161 }{ 162 {"TDefault", defaultCfg, localIP.String(), uint32(defaultCfg.NetProtocolPort), localIP.String(), uint32(defaultCfg.NetProtocolPort), false}, 163 // wrong ProtocolAddress 0.0.0.0 164 {"TUnspecifiedAddr", &cfg.P2PConfig{NetProtocolAddr: "0.0.0.0", NetProtocolPort: 7846}, localIP.String(), 7846, localIP.String(), uint32(defaultCfg.NetProtocolPort), true}, 165 // wrong ProtocolAddress 166 {"TWrongAddr", &cfg.P2PConfig{NetProtocolAddr: "24558.30.0.0", NetProtocolPort: 7846}, localIP.String(), 7846, localIP.String(), 7846, true}, 167 // bind all address 168 {"TBindAll", &cfg.P2PConfig{NetProtocolAddr: "", NetProtocolPort: 7846, NPBindAddr: "0.0.0.0"}, localIP.String(), 7846, "0.0.0.0", 7846, false}, 169 // bind different address 170 {"TBindDifferAddr", &cfg.P2PConfig{NetProtocolAddr: "", NetProtocolPort: 7846, NPBindAddr: "172.21.1.2"}, localIP.String(), 7846, "172.21.1.2", 7846, false}, 171 // bind different port 172 {"TDifferPort", &cfg.P2PConfig{NetProtocolAddr: "", NetProtocolPort: 7846, NPBindPort: 12345}, localIP.String(), 7846, localIP.String(), 12345, false}, 173 // bind different address and port 174 {"TBindDiffer", &cfg.P2PConfig{NetProtocolAddr: "", NetProtocolPort: 7846, NPBindAddr: "172.21.1.2", NPBindPort: 12345}, localIP.String(), 7846, "172.21.1.2", 12345, false}, 175 // TODO: test cases 176 } 177 for _, test := range tests { 178 t.Run(test.name, func(t *testing.T) { 179 if test.expectPanic { 180 defer func() { 181 if r := recover(); r != nil { 182 fmt.Println(test.name, " expected panic occurred ", r) 183 } 184 }() 185 pm := peerManager{conf: test.inCfg} 186 187 pm.init() 188 } 189 }) 190 } 191 } 192 193 func Test_peerManager_runManagePeers_MultiConnWorks(t *testing.T) { 194 // Test if it works well when concurrent connections is handshaked. 195 ctrl := gomock.NewController(t) 196 defer ctrl.Finish() 197 198 logger := log.NewLogger("p2p.test") 199 type desc struct { 200 pid types.PeerID 201 outbound bool 202 hsTime time.Duration 203 } 204 ds := make([]desc, 10) 205 for i := 0; i < 10; i++ { 206 pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) 207 pid, _ := types.IDFromPrivateKey(pkey) 208 ds[i] = desc{hsTime: time.Millisecond * 10, outbound: true, pid: pid} 209 } 210 tests := []struct { 211 name string 212 213 conns []desc 214 }{ 215 {"T10", ds}, 216 // TODO: Add test cases. 217 } 218 for _, tt := range tests { 219 t.Run(tt.name, func(t *testing.T) { 220 mockPeerFinder := p2pmock.NewMockPeerFinder(ctrl) 221 mockWPManager := p2pmock.NewMockWaitingPeerManager(ctrl) 222 mockWPManager.EXPECT().CheckAndConnect().AnyTimes() 223 mockNT := p2pmock.NewMockNetworkTransport(ctrl) 224 mockNT.EXPECT().AddStreamHandler(gomock.Any(), gomock.Any()).AnyTimes() 225 mockNT.EXPECT().RemoveStreamHandler(gomock.Any()).AnyTimes() 226 227 dummyCfg := &cfg.P2PConfig{} 228 pm := &peerManager{ 229 peerFinder: mockPeerFinder, 230 wpManager: mockWPManager, 231 remotePeers: make(map[types.PeerID]p2pcommon.RemotePeer, 10), 232 waitingPeers: make(map[types.PeerID]*p2pcommon.WaitingPeer, 10), 233 conf: dummyCfg, 234 nt: mockNT, 235 236 getPeerChannel: make(chan getPeerTask), 237 peerHandshaked: make(chan handshakeResult), 238 removePeerChannel: make(chan p2pcommon.RemotePeer), 239 fillPoolChannel: make(chan []p2pcommon.PeerMeta, 2), 240 inboundConnChan: make(chan inboundConnEvent), 241 workDoneChannel: make(chan p2pcommon.ConnWorkResult), 242 eventListeners: make([]p2pcommon.PeerEventListener, 0, 4), 243 finishChannel: make(chan struct{}), 244 245 logger: logger, 246 } 247 248 go pm.runManagePeers() 249 250 workWG := sync.WaitGroup{} 251 workWG.Add(len(tt.conns)) 252 latch := sync.WaitGroup{} 253 latch.Add(len(tt.conns)) 254 finCnt := uint32(0) 255 for i, conn := range tt.conns { 256 meta := p2pcommon.PeerMeta{ID: conn.pid, Outbound: conn.outbound} 257 wr := p2pcommon.ConnWorkResult{Meta: meta, Result: nil, Inbound: !conn.outbound, Seq: uint32(i)} 258 go func(conn desc, result p2pcommon.ConnWorkResult) { 259 latch.Done() 260 latch.Wait() 261 //fmt.Printf("work start %s #%d",p2putil.ShortForm(meta.ID),i) 262 //time.Sleep(conn.hsTime) 263 fmt.Printf("work done %s #%d\n", p2putil.ShortForm(meta.ID), wr.Seq) 264 pm.workDoneChannel <- result 265 }(conn, wr) 266 } 267 mockWPManager.EXPECT().OnWorkDone(gomock.AssignableToTypeOf(p2pcommon.ConnWorkResult{})).Do( 268 func(wr p2pcommon.ConnWorkResult) { 269 atomic.AddUint32(&finCnt, 1) 270 workWG.Done() 271 }).AnyTimes() 272 273 workWG.Wait() 274 pm.Stop() 275 276 if atomic.LoadUint32(&finCnt) != uint32(len(tt.conns)) { 277 t.Errorf("finished count %v want %v", finCnt, len(tt.conns)) 278 } 279 }) 280 } 281 } 282 283 func Test_peerManager_Stop(t *testing.T) { 284 // check if Stop is working. 285 tests := []struct { 286 name string 287 288 prevStatus int32 289 290 wantStatus int32 291 wantSentChannel bool 292 }{ 293 // never send to finish channel twice. 294 {"TInitial", initial, stopping, false}, 295 {"TRunning", running, stopping, true}, 296 {"TStopping", stopping, stopping, false}, 297 {"TStopped", stopped, stopped, false}, 298 } 299 for _, tt := range tests { 300 t.Run(tt.name, func(t *testing.T) { 301 pm := &peerManager{ 302 logger: logger, 303 finishChannel: make(chan struct{}, 1), 304 } 305 306 atomic.StoreInt32(&pm.status, tt.prevStatus) 307 pm.Stop() 308 309 if atomic.LoadInt32(&pm.status) != tt.wantStatus { 310 t.Errorf("mansger status %v, want %v ", toMStatusName(atomic.LoadInt32(&pm.status)), 311 toMStatusName(tt.wantStatus)) 312 } 313 var sent bool 314 timeout := time.NewTimer(time.Millisecond << 6) 315 select { 316 case <-pm.finishChannel: 317 sent = true 318 case <-timeout.C: 319 sent = false 320 } 321 if sent != tt.wantSentChannel { 322 t.Errorf("signal sent %v, want %v ", sent, tt.wantSentChannel) 323 } 324 }) 325 } 326 } 327 328 // It tests idempotent of Stop method 329 func Test_peerManager_StopInRun(t *testing.T) { 330 ctrl := gomock.NewController(t) 331 defer ctrl.Finish() 332 333 // check if Stop is working. 334 tests := []struct { 335 name string 336 337 callCnt int 338 wantStatus int32 339 }{ 340 {"TStopOnce", 1, stopped}, 341 {"TStopTwice", 2, stopped}, 342 {"TInStopping", 3, stopped}, 343 } 344 for _, tt := range tests { 345 t.Run(tt.name, func(t *testing.T) { 346 mockNT := p2pmock.NewMockNetworkTransport(ctrl) 347 mockNT.EXPECT().AddStreamHandler(gomock.Any(), gomock.Any()).AnyTimes() 348 mockNT.EXPECT().RemoveStreamHandler(gomock.Any()).AnyTimes() 349 350 mockPeerFinder := p2pmock.NewMockPeerFinder(ctrl) 351 mockWPManager := p2pmock.NewMockWaitingPeerManager(ctrl) 352 353 pm := &peerManager{ 354 logger: logger, 355 nt: mockNT, 356 peerFinder: mockPeerFinder, 357 wpManager: mockWPManager, 358 359 mutex: &sync.Mutex{}, 360 finishChannel: make(chan struct{}), 361 } 362 go pm.runManagePeers() 363 // wait status of pm is changed to running 364 for atomic.LoadInt32(&pm.status) != running { 365 time.Sleep(time.Millisecond) 366 } 367 // stopping will be done within one second if normal status 368 checkTimer := time.NewTimer(time.Second >> 3) 369 for i := 0; i < tt.callCnt; i++ { 370 pm.Stop() 371 time.Sleep(time.Millisecond << 6) 372 } 373 succ := false 374 failedTimeout := time.NewTimer(time.Second * 5) 375 376 // check if status changed 377 VERIFYLOOP: 378 for { 379 select { 380 case <-checkTimer.C: 381 if atomic.LoadInt32(&pm.status) == tt.wantStatus { 382 succ = true 383 break VERIFYLOOP 384 } else { 385 checkTimer.Stop() 386 checkTimer.Reset(time.Second) 387 } 388 case <-failedTimeout.C: 389 break VERIFYLOOP 390 } 391 } 392 if !succ { 393 t.Errorf("mansger status %v, want %v within %v", toMStatusName(atomic.LoadInt32(&pm.status)), 394 toMStatusName(tt.wantStatus), time.Second*5) 395 } 396 }) 397 } 398 } 399 400 func toMStatusName(status int32) string { 401 switch status { 402 case initial: 403 return "initial" 404 case running: 405 return "running" 406 case stopping: 407 return "stopping" 408 case stopped: 409 return "stopped" 410 default: 411 return "(invalid)" + strconv.Itoa(int(status)) 412 } 413 } 414 415 func Test_peerManager_tryRegister(t *testing.T) { 416 ctrl := gomock.NewController(t) 417 defer ctrl.Finish() 418 419 // id0 is in both designated peer and hidden peer 420 desigIDs := make([]types.PeerID, 3) 421 desigPeers := make(map[types.PeerID]p2pcommon.PeerMeta, 3) 422 423 hiddenIDs := make([]types.PeerID, 3) 424 hiddenPeers := make(map[types.PeerID]bool) 425 426 for i := 0; i < 3; i++ { 427 pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) 428 pid, _ := types.IDFromPrivateKey(pkey) 429 desigIDs[i] = pid 430 desigPeers[pid] = p2pcommon.PeerMeta{ID: pid} 431 } 432 hiddenIDs[0] = desigIDs[0] 433 hiddenPeers[desigIDs[0]] = true 434 435 for i := 1; i < 3; i++ { 436 pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) 437 pid, _ := types.IDFromPrivateKey(pkey) 438 hiddenIDs[i] = pid 439 hiddenPeers[pid] = true 440 } 441 442 type args struct { 443 outound bool 444 status *types.Status 445 } 446 tests := []struct { 447 name string 448 args args 449 450 wantSucc bool 451 wantDesign bool 452 wantHidden bool 453 }{ 454 // add inbound peer 455 {"TIn", args{false, 456 dummyStatus(dummyPeerID, false)}, true, false, false}, 457 // add inbound designated peer 458 {"TInDesignated", args{false, 459 dummyStatus(desigIDs[1], false)}, true, true, false}, 460 // add inbound hidden peer 461 {"TInHidden", args{false, 462 dummyStatus(dummyPeerID, true)}, true, false, true}, 463 // add inbound peer (hidden in node config) 464 {"TInHiddenInConf", args{false, 465 dummyStatus(hiddenIDs[1], false)}, true, false, true}, 466 {"TInH&D", args{false, 467 dummyStatus(hiddenIDs[0], true)}, true, true, true}, 468 469 // add outbound peer 470 {"TOut", args{true, 471 dummyStatus(dummyPeerID, false)}, true, false, false}, 472 // add outbound designated peer 473 {"TOutDesignated", args{true, 474 dummyStatus(desigIDs[1], false)}, true, true, false}, 475 // add outbound hidden peer 476 {"TOutHidden", args{true, 477 dummyStatus(dummyPeerID, true)}, true, false, true}, 478 // add outbound peer (hidden in node config) 479 {"TOutHiddenInConf", args{true, 480 dummyStatus(hiddenIDs[1], false)}, true, false, true}, 481 {"TOutH&D", args{true, 482 dummyStatus(hiddenIDs[0], true)}, true, true, true}, 483 } 484 for _, tt := range tests { 485 t.Run(tt.name, func(t *testing.T) { 486 mockStream := p2pmock.NewMockStream(ctrl) 487 mockStream.EXPECT().Close().AnyTimes() 488 mockRW := p2pmock.NewMockMsgReadWriter(ctrl) 489 mockRW.EXPECT().ReadMsg().DoAndReturn(func() (interface{}, error) { 490 time.Sleep(time.Millisecond * 10) 491 return nil, errors.New("close") 492 }).AnyTimes() 493 mockPeerFactory := p2pmock.NewMockPeerFactory(ctrl) 494 mockPeer := p2pmock.NewMockRemotePeer(ctrl) 495 496 in := handshakeResult{meta: p2pcommon.NewMetaFromStatus(tt.args.status, tt.args.outound), status: tt.args.status, msgRW: mockRW, s: mockStream} 497 var gotMeta p2pcommon.PeerMeta 498 499 mockPeerFactory.EXPECT().CreateRemotePeer(gomock.AssignableToTypeOf(p2pcommon.PeerMeta{}), gomock.Any(), in.status, mockStream, mockRW).Do(func(meta p2pcommon.PeerMeta, seq uint32, status *types.Status, stream network.Stream, rw p2pcommon.MsgReadWriter) { 500 gotMeta = meta 501 }).Return(mockPeer) 502 mockPeer.EXPECT().RunPeer().MaxTimes(1) 503 mockPeer.EXPECT().Role().Return(p2pcommon.BlockProducer).AnyTimes() 504 mockPeer.EXPECT().Name().Return("testPeer").AnyTimes() 505 506 // in cases of handshake error 507 mockMF := p2pmock.NewMockMoFactory(ctrl) 508 mockMF.EXPECT().NewMsgRequestOrder(false, p2pcommon.GoAway, gomock.Any()).Return(&pbRequestOrder{}).MaxTimes(1) 509 mockRW.EXPECT().WriteMsg(gomock.Any()).MaxTimes(1) 510 511 pm := &peerManager{ 512 peerFactory: mockPeerFactory, 513 designatedPeers: desigPeers, 514 hiddenPeerSet: hiddenPeers, 515 logger: logger, 516 mutex: &sync.Mutex{}, 517 remotePeers: make(map[types.PeerID]p2pcommon.RemotePeer, 100), 518 peerHandshaked: make(chan handshakeResult, 10), 519 } 520 521 522 r := pm.tryRegister(in) 523 if (r != nil) != tt.wantSucc { 524 t.Errorf("peerManager.tryRegister() succ = %v, want %v", r != nil, tt.wantSucc) 525 } 526 if tt.wantSucc { 527 got := gotMeta 528 if got.Designated != tt.wantDesign { 529 t.Errorf("peerManager.tryRegister() got Designated = %v, want %v", got.Designated, tt.wantDesign) 530 } 531 if got.Hidden != tt.wantHidden { 532 t.Errorf("peerManager.tryRegister() got Hidden = %v, want %v", got.Hidden, tt.wantHidden) 533 } 534 535 } 536 }) 537 } 538 } 539 540 func Test_peerManager_tryRegisterCollision(t *testing.T) { 541 ctrl := gomock.NewController(t) 542 defer ctrl.Finish() 543 544 selfID := p2pkey.NodeID() 545 inboundWillLive := p2putil.ComparePeerID(selfID, dummyPeerID) <= 0 546 type args struct { 547 outbound bool 548 status *types.Status 549 } 550 tests := []struct { 551 name string 552 args args 553 554 wantSucc bool 555 }{ 556 // internal test self peerid is higher than test dummyPeerID 557 {"TIn", args{false, 558 dummyStatus(dummyPeerID, false)}, inboundWillLive}, 559 {"TOut", args{true, 560 dummyStatus(dummyPeerID, false)}, !inboundWillLive}, 561 } 562 for _, tt := range tests { 563 t.Run(tt.name, func(t *testing.T) { 564 mockIS := p2pmock.NewMockInternalService(ctrl) 565 mockStream := p2pmock.NewMockStream(ctrl) 566 mockStream.EXPECT().Close().AnyTimes() 567 mockRW := p2pmock.NewMockMsgReadWriter(ctrl) 568 mockRW.EXPECT().ReadMsg().DoAndReturn(func() (interface{}, error) { 569 time.Sleep(time.Millisecond * 10) 570 return nil, errors.New("close") 571 }).AnyTimes() 572 573 in := handshakeResult{meta: p2pcommon.NewMetaFromStatus(tt.args.status, tt.args.outbound), status: tt.args.status, msgRW: mockRW, s: mockStream} 574 mockIS.EXPECT().SelfNodeID().Return(selfID).MinTimes(1) 575 mockPeerFactory := p2pmock.NewMockPeerFactory(ctrl) 576 mockPeer := p2pmock.NewMockRemotePeer(ctrl) 577 mockPeer.EXPECT().RunPeer().MaxTimes(1) 578 mockPeer.EXPECT().Role().Return(p2pcommon.BlockProducer).AnyTimes() 579 mockPeer.EXPECT().Name().Return("testPeer").AnyTimes() 580 if tt.wantSucc { 581 mockPeer.EXPECT().Stop().MaxTimes(1) 582 mockPeerFactory.EXPECT().CreateRemotePeer(gomock.AssignableToTypeOf(p2pcommon.PeerMeta{}), gomock.Any(), in.status, mockStream, mockRW).Return(mockPeer) 583 } 584 585 // in cases of handshake error 586 mockRW.EXPECT().WriteMsg(gomock.Any()).MaxTimes(1) 587 588 pm := &peerManager{ 589 is: mockIS, 590 peerFactory: mockPeerFactory, 591 designatedPeers: make(map[types.PeerID]p2pcommon.PeerMeta), 592 hiddenPeerSet: make(map[types.PeerID]bool), 593 logger: logger, 594 mutex: &sync.Mutex{}, 595 remotePeers: make(map[types.PeerID]p2pcommon.RemotePeer, 100), 596 peerHandshaked: make(chan handshakeResult, 10), 597 } 598 pm.remotePeers[dummyPeerID] = mockPeer 599 600 601 r := pm.tryRegister(in) 602 if (r != nil) != tt.wantSucc { 603 t.Errorf("peerManager.tryRegister() succ = %v, want %v", r != nil, tt.wantSucc) 604 } 605 }) 606 } 607 }