github.com/ava-labs/avalanchego@v1.11.11/network/peer/peer_test.go (about) 1 // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package peer 5 6 import ( 7 "context" 8 "crypto" 9 "net" 10 "net/netip" 11 "testing" 12 "time" 13 14 "github.com/prometheus/client_golang/prometheus" 15 "github.com/stretchr/testify/require" 16 17 "github.com/ava-labs/avalanchego/ids" 18 "github.com/ava-labs/avalanchego/message" 19 "github.com/ava-labs/avalanchego/network/throttling" 20 "github.com/ava-labs/avalanchego/proto/pb/p2p" 21 "github.com/ava-labs/avalanchego/snow/networking/router" 22 "github.com/ava-labs/avalanchego/snow/networking/tracker" 23 "github.com/ava-labs/avalanchego/snow/uptime" 24 "github.com/ava-labs/avalanchego/snow/validators" 25 "github.com/ava-labs/avalanchego/staking" 26 "github.com/ava-labs/avalanchego/upgrade" 27 "github.com/ava-labs/avalanchego/utils" 28 "github.com/ava-labs/avalanchego/utils/constants" 29 "github.com/ava-labs/avalanchego/utils/crypto/bls" 30 "github.com/ava-labs/avalanchego/utils/logging" 31 "github.com/ava-labs/avalanchego/utils/math/meter" 32 "github.com/ava-labs/avalanchego/utils/resource" 33 "github.com/ava-labs/avalanchego/utils/set" 34 "github.com/ava-labs/avalanchego/version" 35 ) 36 37 type testPeer struct { 38 Peer 39 inboundMsgChan <-chan message.InboundMessage 40 } 41 42 type rawTestPeer struct { 43 config *Config 44 cert *staking.Certificate 45 inboundMsgChan <-chan message.InboundMessage 46 } 47 48 func newMessageCreator(t *testing.T) message.Creator { 49 t.Helper() 50 51 mc, err := message.NewCreator( 52 logging.NoLog{}, 53 prometheus.NewRegistry(), 54 constants.DefaultNetworkCompressionType, 55 10*time.Second, 56 ) 57 require.NoError(t, err) 58 59 return mc 60 } 61 62 func newConfig(t *testing.T) Config { 63 t.Helper() 64 require := require.New(t) 65 66 metrics, err := NewMetrics(prometheus.NewRegistry()) 67 require.NoError(err) 68 69 resourceTracker, err := tracker.NewResourceTracker( 70 prometheus.NewRegistry(), 71 resource.NoUsage, 72 meter.ContinuousFactory{}, 73 10*time.Second, 74 ) 75 require.NoError(err) 76 77 return Config{ 78 ReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, 79 WriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, 80 Metrics: metrics, 81 MessageCreator: newMessageCreator(t), 82 Log: logging.NoLog{}, 83 InboundMsgThrottler: throttling.NewNoInboundThrottler(), 84 Network: TestNetwork, 85 Router: nil, 86 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 87 MySubnets: nil, 88 Beacons: validators.NewManager(), 89 Validators: validators.NewManager(), 90 NetworkID: constants.LocalID, 91 PingFrequency: constants.DefaultPingFrequency, 92 PongTimeout: constants.DefaultPingPongTimeout, 93 MaxClockDifference: time.Minute, 94 ResourceTracker: resourceTracker, 95 UptimeCalculator: uptime.NoOpCalculator, 96 IPSigner: nil, 97 } 98 } 99 100 func newRawTestPeer(t *testing.T, config Config) *rawTestPeer { 101 t.Helper() 102 require := require.New(t) 103 104 tlsCert, err := staking.NewTLSCert() 105 require.NoError(err) 106 cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) 107 require.NoError(err) 108 config.MyNodeID = ids.NodeIDFromCert(cert) 109 110 ip := utils.NewAtomic(netip.AddrPortFrom( 111 netip.IPv6Loopback(), 112 1, 113 )) 114 tls := tlsCert.PrivateKey.(crypto.Signer) 115 bls, err := bls.NewSecretKey() 116 require.NoError(err) 117 118 config.IPSigner = NewIPSigner(ip, tls, bls) 119 120 inboundMsgChan := make(chan message.InboundMessage) 121 config.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { 122 inboundMsgChan <- msg 123 }) 124 125 return &rawTestPeer{ 126 config: &config, 127 cert: cert, 128 inboundMsgChan: inboundMsgChan, 129 } 130 } 131 132 func startTestPeer(self *rawTestPeer, peer *rawTestPeer, conn net.Conn) *testPeer { 133 return &testPeer{ 134 Peer: Start( 135 self.config, 136 conn, 137 peer.cert, 138 peer.config.MyNodeID, 139 NewThrottledMessageQueue( 140 self.config.Metrics, 141 peer.config.MyNodeID, 142 logging.NoLog{}, 143 throttling.NewNoOutboundThrottler(), 144 ), 145 ), 146 inboundMsgChan: self.inboundMsgChan, 147 } 148 } 149 150 func startTestPeers(rawPeer0 *rawTestPeer, rawPeer1 *rawTestPeer) (*testPeer, *testPeer) { 151 conn0, conn1 := net.Pipe() 152 peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) 153 peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) 154 return peer0, peer1 155 } 156 157 func awaitReady(t *testing.T, peers ...Peer) { 158 t.Helper() 159 require := require.New(t) 160 161 for _, peer := range peers { 162 require.NoError(peer.AwaitReady(context.Background())) 163 require.True(peer.Ready()) 164 } 165 } 166 167 func TestReady(t *testing.T) { 168 require := require.New(t) 169 170 config := newConfig(t) 171 172 rawPeer0 := newRawTestPeer(t, config) 173 rawPeer1 := newRawTestPeer(t, config) 174 175 conn0, conn1 := net.Pipe() 176 177 peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) 178 require.False(peer0.Ready()) 179 180 peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) 181 awaitReady(t, peer0, peer1) 182 183 peer0.StartClose() 184 require.NoError(peer0.AwaitClosed(context.Background())) 185 require.NoError(peer1.AwaitClosed(context.Background())) 186 } 187 188 func TestSend(t *testing.T) { 189 require := require.New(t) 190 191 sharedConfig := newConfig(t) 192 193 rawPeer0 := newRawTestPeer(t, sharedConfig) 194 rawPeer1 := newRawTestPeer(t, sharedConfig) 195 196 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 197 awaitReady(t, peer0, peer1) 198 199 outboundGetMsg, err := sharedConfig.MessageCreator.Get(ids.Empty, 1, time.Second, ids.Empty) 200 require.NoError(err) 201 202 require.True(peer0.Send(context.Background(), outboundGetMsg)) 203 204 inboundGetMsg := <-peer1.inboundMsgChan 205 require.Equal(message.GetOp, inboundGetMsg.Op()) 206 207 peer1.StartClose() 208 require.NoError(peer0.AwaitClosed(context.Background())) 209 require.NoError(peer1.AwaitClosed(context.Background())) 210 } 211 212 func TestPingUptimes(t *testing.T) { 213 trackedSubnetID := ids.GenerateTestID() 214 untrackedSubnetID := ids.GenerateTestID() 215 216 sharedConfig := newConfig(t) 217 sharedConfig.MySubnets = set.Of(trackedSubnetID) 218 219 testCases := []struct { 220 name string 221 msg message.OutboundMessage 222 shouldClose bool 223 assertFn func(*require.Assertions, *testPeer) 224 }{ 225 { 226 name: "primary network only", 227 msg: func() message.OutboundMessage { 228 pingMsg, err := sharedConfig.MessageCreator.Ping(1, nil) 229 require.NoError(t, err) 230 return pingMsg 231 }(), 232 shouldClose: false, 233 assertFn: func(require *require.Assertions, peer *testPeer) { 234 uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) 235 require.True(ok) 236 require.Equal(uint32(1), uptime) 237 238 uptime, ok = peer.ObservedUptime(trackedSubnetID) 239 require.False(ok) 240 require.Zero(uptime) 241 }, 242 }, 243 { 244 name: "primary network and subnet", 245 msg: func() message.OutboundMessage { 246 pingMsg, err := sharedConfig.MessageCreator.Ping( 247 1, 248 []*p2p.SubnetUptime{ 249 { 250 SubnetId: trackedSubnetID[:], 251 Uptime: 1, 252 }, 253 }, 254 ) 255 require.NoError(t, err) 256 return pingMsg 257 }(), 258 shouldClose: false, 259 assertFn: func(require *require.Assertions, peer *testPeer) { 260 uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) 261 require.True(ok) 262 require.Equal(uint32(1), uptime) 263 264 uptime, ok = peer.ObservedUptime(trackedSubnetID) 265 require.True(ok) 266 require.Equal(uint32(1), uptime) 267 }, 268 }, 269 { 270 name: "primary network and non tracked subnet", 271 msg: func() message.OutboundMessage { 272 pingMsg, err := sharedConfig.MessageCreator.Ping( 273 1, 274 []*p2p.SubnetUptime{ 275 { 276 // Providing the untrackedSubnetID here should cause 277 // the remote peer to disconnect from us. 278 SubnetId: untrackedSubnetID[:], 279 Uptime: 1, 280 }, 281 { 282 SubnetId: trackedSubnetID[:], 283 Uptime: 1, 284 }, 285 }, 286 ) 287 require.NoError(t, err) 288 return pingMsg 289 }(), 290 shouldClose: true, 291 assertFn: nil, 292 }, 293 } 294 295 // The raw peers are generated outside of the test cases to avoid generating 296 // many TLS keys. 297 rawPeer0 := newRawTestPeer(t, sharedConfig) 298 rawPeer1 := newRawTestPeer(t, sharedConfig) 299 300 for _, tc := range testCases { 301 t.Run(tc.name, func(t *testing.T) { 302 require := require.New(t) 303 304 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 305 awaitReady(t, peer0, peer1) 306 defer func() { 307 peer1.StartClose() 308 peer0.StartClose() 309 require.NoError(peer0.AwaitClosed(context.Background())) 310 require.NoError(peer1.AwaitClosed(context.Background())) 311 }() 312 313 require.True(peer0.Send(context.Background(), tc.msg)) 314 315 if tc.shouldClose { 316 require.NoError(peer1.AwaitClosed(context.Background())) 317 return 318 } 319 320 // we send Get message after ping to ensure Ping is handled by the 321 // time Get is handled. This is because Get is routed to the handler 322 // whereas Ping is handled by the peer directly. We have no way to 323 // know when the peer has handled the Ping message. 324 sendAndFlush(t, peer0, peer1) 325 326 tc.assertFn(require, peer1) 327 }) 328 } 329 } 330 331 func TestTrackedSubnets(t *testing.T) { 332 sharedConfig := newConfig(t) 333 rawPeer0 := newRawTestPeer(t, sharedConfig) 334 rawPeer1 := newRawTestPeer(t, sharedConfig) 335 336 makeSubnetIDs := func(numSubnets int) []ids.ID { 337 subnetIDs := make([]ids.ID, numSubnets) 338 for i := range subnetIDs { 339 subnetIDs[i] = ids.GenerateTestID() 340 } 341 return subnetIDs 342 } 343 344 tests := []struct { 345 name string 346 trackedSubnets []ids.ID 347 shouldDisconnect bool 348 }{ 349 { 350 name: "primary network only", 351 trackedSubnets: makeSubnetIDs(0), 352 shouldDisconnect: false, 353 }, 354 { 355 name: "single subnet", 356 trackedSubnets: makeSubnetIDs(1), 357 shouldDisconnect: false, 358 }, 359 { 360 name: "max subnets", 361 trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets), 362 shouldDisconnect: false, 363 }, 364 { 365 name: "too many subnets", 366 trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets + 1), 367 shouldDisconnect: true, 368 }, 369 } 370 371 for _, test := range tests { 372 t.Run(test.name, func(t *testing.T) { 373 require := require.New(t) 374 375 rawPeer0.config.MySubnets = set.Of(test.trackedSubnets...) 376 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 377 if test.shouldDisconnect { 378 require.NoError(peer0.AwaitClosed(context.Background())) 379 require.NoError(peer1.AwaitClosed(context.Background())) 380 return 381 } 382 383 defer func() { 384 peer1.StartClose() 385 peer0.StartClose() 386 require.NoError(peer0.AwaitClosed(context.Background())) 387 require.NoError(peer1.AwaitClosed(context.Background())) 388 }() 389 390 awaitReady(t, peer0, peer1) 391 392 require.Equal(set.Of(constants.PrimaryNetworkID), peer0.TrackedSubnets()) 393 394 expectedTrackedSubnets := set.Of(test.trackedSubnets...) 395 expectedTrackedSubnets.Add(constants.PrimaryNetworkID) 396 require.Equal(expectedTrackedSubnets, peer1.TrackedSubnets()) 397 }) 398 } 399 } 400 401 // Test that a peer using the wrong BLS key is disconnected from. 402 func TestInvalidBLSKeyDisconnects(t *testing.T) { 403 require := require.New(t) 404 405 sharedConfig := newConfig(t) 406 407 rawPeer0 := newRawTestPeer(t, sharedConfig) 408 rawPeer1 := newRawTestPeer(t, sharedConfig) 409 410 require.NoError(rawPeer0.config.Validators.AddStaker( 411 constants.PrimaryNetworkID, 412 rawPeer1.config.MyNodeID, 413 bls.PublicFromSecretKey(rawPeer1.config.IPSigner.blsSigner), 414 ids.GenerateTestID(), 415 1, 416 )) 417 418 bogusBLSKey, err := bls.NewSecretKey() 419 require.NoError(err) 420 require.NoError(rawPeer1.config.Validators.AddStaker( 421 constants.PrimaryNetworkID, 422 rawPeer0.config.MyNodeID, 423 bls.PublicFromSecretKey(bogusBLSKey), // This is the wrong BLS key for this peer 424 ids.GenerateTestID(), 425 1, 426 )) 427 428 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 429 430 // Because peer1 thinks that peer0 is using the wrong BLS key, they should 431 // disconnect from each other. 432 require.NoError(peer0.AwaitClosed(context.Background())) 433 require.NoError(peer1.AwaitClosed(context.Background())) 434 } 435 436 func TestShouldDisconnect(t *testing.T) { 437 peerID := ids.GenerateTestNodeID() 438 txID := ids.GenerateTestID() 439 blsKey, err := bls.NewSecretKey() 440 require.NoError(t, err) 441 442 tests := []struct { 443 name string 444 initialPeer *peer 445 expectedPeer *peer 446 expectedShouldDisconnect bool 447 }{ 448 { 449 name: "peer is reporting old version", 450 initialPeer: &peer{ 451 Config: &Config{ 452 Log: logging.NoLog{}, 453 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 454 }, 455 version: &version.Application{ 456 Name: version.Client, 457 Major: 0, 458 Minor: 0, 459 Patch: 0, 460 }, 461 }, 462 expectedPeer: &peer{ 463 Config: &Config{ 464 Log: logging.NoLog{}, 465 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 466 }, 467 version: &version.Application{ 468 Name: version.Client, 469 Major: 0, 470 Minor: 0, 471 Patch: 0, 472 }, 473 }, 474 expectedShouldDisconnect: true, 475 }, 476 { 477 name: "peer is not a validator", 478 initialPeer: &peer{ 479 Config: &Config{ 480 Log: logging.NoLog{}, 481 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 482 Validators: validators.NewManager(), 483 }, 484 version: version.CurrentApp, 485 }, 486 expectedPeer: &peer{ 487 Config: &Config{ 488 Log: logging.NoLog{}, 489 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 490 Validators: validators.NewManager(), 491 }, 492 version: version.CurrentApp, 493 }, 494 expectedShouldDisconnect: false, 495 }, 496 { 497 name: "peer is a validator without a BLS key", 498 initialPeer: &peer{ 499 Config: &Config{ 500 Log: logging.NoLog{}, 501 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 502 Validators: func() validators.Manager { 503 vdrs := validators.NewManager() 504 require.NoError(t, vdrs.AddStaker( 505 constants.PrimaryNetworkID, 506 peerID, 507 nil, 508 txID, 509 1, 510 )) 511 return vdrs 512 }(), 513 }, 514 id: peerID, 515 version: version.CurrentApp, 516 }, 517 expectedPeer: &peer{ 518 Config: &Config{ 519 Log: logging.NoLog{}, 520 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 521 Validators: func() validators.Manager { 522 vdrs := validators.NewManager() 523 require.NoError(t, vdrs.AddStaker( 524 constants.PrimaryNetworkID, 525 peerID, 526 nil, 527 txID, 528 1, 529 )) 530 return vdrs 531 }(), 532 }, 533 id: peerID, 534 version: version.CurrentApp, 535 }, 536 expectedShouldDisconnect: false, 537 }, 538 { 539 name: "already verified peer", 540 initialPeer: &peer{ 541 Config: &Config{ 542 Log: logging.NoLog{}, 543 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 544 Validators: func() validators.Manager { 545 vdrs := validators.NewManager() 546 require.NoError(t, vdrs.AddStaker( 547 constants.PrimaryNetworkID, 548 peerID, 549 bls.PublicFromSecretKey(blsKey), 550 txID, 551 1, 552 )) 553 return vdrs 554 }(), 555 }, 556 id: peerID, 557 version: version.CurrentApp, 558 txIDOfVerifiedBLSKey: txID, 559 }, 560 expectedPeer: &peer{ 561 Config: &Config{ 562 Log: logging.NoLog{}, 563 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 564 Validators: func() validators.Manager { 565 vdrs := validators.NewManager() 566 require.NoError(t, vdrs.AddStaker( 567 constants.PrimaryNetworkID, 568 peerID, 569 bls.PublicFromSecretKey(blsKey), 570 txID, 571 1, 572 )) 573 return vdrs 574 }(), 575 }, 576 id: peerID, 577 version: version.CurrentApp, 578 txIDOfVerifiedBLSKey: txID, 579 }, 580 expectedShouldDisconnect: false, 581 }, 582 { 583 name: "peer without signature", 584 initialPeer: &peer{ 585 Config: &Config{ 586 Log: logging.NoLog{}, 587 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 588 Validators: func() validators.Manager { 589 vdrs := validators.NewManager() 590 require.NoError(t, vdrs.AddStaker( 591 constants.PrimaryNetworkID, 592 peerID, 593 bls.PublicFromSecretKey(blsKey), 594 txID, 595 1, 596 )) 597 return vdrs 598 }(), 599 }, 600 id: peerID, 601 version: version.CurrentApp, 602 ip: &SignedIP{}, 603 }, 604 expectedPeer: &peer{ 605 Config: &Config{ 606 Log: logging.NoLog{}, 607 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 608 Validators: func() validators.Manager { 609 vdrs := validators.NewManager() 610 require.NoError(t, vdrs.AddStaker( 611 constants.PrimaryNetworkID, 612 peerID, 613 bls.PublicFromSecretKey(blsKey), 614 txID, 615 1, 616 )) 617 return vdrs 618 }(), 619 }, 620 id: peerID, 621 version: version.CurrentApp, 622 ip: &SignedIP{}, 623 }, 624 expectedShouldDisconnect: true, 625 }, 626 { 627 name: "peer with invalid signature", 628 initialPeer: &peer{ 629 Config: &Config{ 630 Log: logging.NoLog{}, 631 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 632 Validators: func() validators.Manager { 633 vdrs := validators.NewManager() 634 require.NoError(t, vdrs.AddStaker( 635 constants.PrimaryNetworkID, 636 peerID, 637 bls.PublicFromSecretKey(blsKey), 638 txID, 639 1, 640 )) 641 return vdrs 642 }(), 643 }, 644 id: peerID, 645 version: version.CurrentApp, 646 ip: &SignedIP{ 647 BLSSignature: bls.SignProofOfPossession(blsKey, []byte("wrong message")), 648 }, 649 }, 650 expectedPeer: &peer{ 651 Config: &Config{ 652 Log: logging.NoLog{}, 653 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 654 Validators: func() validators.Manager { 655 vdrs := validators.NewManager() 656 require.NoError(t, vdrs.AddStaker( 657 constants.PrimaryNetworkID, 658 peerID, 659 bls.PublicFromSecretKey(blsKey), 660 txID, 661 1, 662 )) 663 return vdrs 664 }(), 665 }, 666 id: peerID, 667 version: version.CurrentApp, 668 ip: &SignedIP{ 669 BLSSignature: bls.SignProofOfPossession(blsKey, []byte("wrong message")), 670 }, 671 }, 672 expectedShouldDisconnect: true, 673 }, 674 { 675 name: "peer with valid signature", 676 initialPeer: &peer{ 677 Config: &Config{ 678 Log: logging.NoLog{}, 679 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 680 Validators: func() validators.Manager { 681 vdrs := validators.NewManager() 682 require.NoError(t, vdrs.AddStaker( 683 constants.PrimaryNetworkID, 684 peerID, 685 bls.PublicFromSecretKey(blsKey), 686 txID, 687 1, 688 )) 689 return vdrs 690 }(), 691 }, 692 id: peerID, 693 version: version.CurrentApp, 694 ip: &SignedIP{ 695 BLSSignature: bls.SignProofOfPossession(blsKey, (&UnsignedIP{}).bytes()), 696 }, 697 }, 698 expectedPeer: &peer{ 699 Config: &Config{ 700 Log: logging.NoLog{}, 701 VersionCompatibility: version.GetCompatibility(upgrade.InitiallyActiveTime), 702 Validators: func() validators.Manager { 703 vdrs := validators.NewManager() 704 require.NoError(t, vdrs.AddStaker( 705 constants.PrimaryNetworkID, 706 peerID, 707 bls.PublicFromSecretKey(blsKey), 708 txID, 709 1, 710 )) 711 return vdrs 712 }(), 713 }, 714 id: peerID, 715 version: version.CurrentApp, 716 ip: &SignedIP{ 717 BLSSignature: bls.SignProofOfPossession(blsKey, (&UnsignedIP{}).bytes()), 718 }, 719 txIDOfVerifiedBLSKey: txID, 720 }, 721 expectedShouldDisconnect: false, 722 }, 723 } 724 725 for _, test := range tests { 726 t.Run(test.name, func(t *testing.T) { 727 require := require.New(t) 728 729 shouldDisconnect := test.initialPeer.shouldDisconnect() 730 require.Equal(test.expectedPeer, test.initialPeer) 731 require.Equal(test.expectedShouldDisconnect, shouldDisconnect) 732 }) 733 } 734 } 735 736 // Helper to send a message from sender to receiver and assert that the 737 // receiver receives the message. This can be used to test a prior message 738 // was handled by the peer. 739 func sendAndFlush(t *testing.T, sender *testPeer, receiver *testPeer) { 740 t.Helper() 741 mc := newMessageCreator(t) 742 outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) 743 require.NoError(t, err) 744 require.True(t, sender.Send(context.Background(), outboundGetMsg)) 745 inboundGetMsg := <-receiver.inboundMsgChan 746 require.Equal(t, message.GetOp, inboundGetMsg.Op()) 747 }