github.com/MetalBlockchain/metalgo@v1.11.9/network/peer/peer_test.go (about) 1 // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package peer 5 6 import ( 7 "context" 8 "crypto" 9 "net" 10 "net/netip" 11 "testing" 12 "time" 13 14 "github.com/prometheus/client_golang/prometheus" 15 "github.com/stretchr/testify/require" 16 17 "github.com/MetalBlockchain/metalgo/ids" 18 "github.com/MetalBlockchain/metalgo/message" 19 "github.com/MetalBlockchain/metalgo/network/throttling" 20 "github.com/MetalBlockchain/metalgo/proto/pb/p2p" 21 "github.com/MetalBlockchain/metalgo/snow/networking/router" 22 "github.com/MetalBlockchain/metalgo/snow/networking/tracker" 23 "github.com/MetalBlockchain/metalgo/snow/uptime" 24 "github.com/MetalBlockchain/metalgo/snow/validators" 25 "github.com/MetalBlockchain/metalgo/staking" 26 "github.com/MetalBlockchain/metalgo/utils" 27 "github.com/MetalBlockchain/metalgo/utils/constants" 28 "github.com/MetalBlockchain/metalgo/utils/crypto/bls" 29 "github.com/MetalBlockchain/metalgo/utils/logging" 30 "github.com/MetalBlockchain/metalgo/utils/math/meter" 31 "github.com/MetalBlockchain/metalgo/utils/resource" 32 "github.com/MetalBlockchain/metalgo/utils/set" 33 "github.com/MetalBlockchain/metalgo/version" 34 ) 35 36 type testPeer struct { 37 Peer 38 inboundMsgChan <-chan message.InboundMessage 39 } 40 41 type rawTestPeer struct { 42 config *Config 43 cert *staking.Certificate 44 nodeID ids.NodeID 45 inboundMsgChan <-chan message.InboundMessage 46 } 47 48 func newMessageCreator(t *testing.T) message.Creator { 49 t.Helper() 50 51 mc, err := message.NewCreator( 52 logging.NoLog{}, 53 prometheus.NewRegistry(), 54 constants.DefaultNetworkCompressionType, 55 10*time.Second, 56 ) 57 require.NoError(t, err) 58 59 return mc 60 } 61 62 func newConfig(t *testing.T) Config { 63 t.Helper() 64 require := require.New(t) 65 66 metrics, err := NewMetrics(prometheus.NewRegistry()) 67 require.NoError(err) 68 69 resourceTracker, err := tracker.NewResourceTracker( 70 prometheus.NewRegistry(), 71 resource.NoUsage, 72 meter.ContinuousFactory{}, 73 10*time.Second, 74 ) 75 require.NoError(err) 76 77 return Config{ 78 ReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, 79 WriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, 80 Metrics: metrics, 81 MessageCreator: newMessageCreator(t), 82 Log: logging.NoLog{}, 83 InboundMsgThrottler: throttling.NewNoInboundThrottler(), 84 Network: TestNetwork, 85 Router: nil, 86 VersionCompatibility: version.GetCompatibility(constants.LocalID), 87 MySubnets: nil, 88 Beacons: validators.NewManager(), 89 Validators: validators.NewManager(), 90 NetworkID: constants.LocalID, 91 PingFrequency: constants.DefaultPingFrequency, 92 PongTimeout: constants.DefaultPingPongTimeout, 93 MaxClockDifference: time.Minute, 94 ResourceTracker: resourceTracker, 95 UptimeCalculator: uptime.NoOpCalculator, 96 IPSigner: nil, 97 } 98 } 99 100 func newRawTestPeer(t *testing.T, config Config) *rawTestPeer { 101 t.Helper() 102 require := require.New(t) 103 104 tlsCert, err := staking.NewTLSCert() 105 require.NoError(err) 106 cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) 107 require.NoError(err) 108 nodeID := ids.NodeIDFromCert(cert) 109 110 ip := utils.NewAtomic(netip.AddrPortFrom( 111 netip.IPv6Loopback(), 112 1, 113 )) 114 tls := tlsCert.PrivateKey.(crypto.Signer) 115 bls, err := bls.NewSecretKey() 116 require.NoError(err) 117 118 config.IPSigner = NewIPSigner(ip, tls, bls) 119 120 inboundMsgChan := make(chan message.InboundMessage) 121 config.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { 122 inboundMsgChan <- msg 123 }) 124 125 return &rawTestPeer{ 126 config: &config, 127 cert: cert, 128 nodeID: nodeID, 129 inboundMsgChan: inboundMsgChan, 130 } 131 } 132 133 func startTestPeer(self *rawTestPeer, peer *rawTestPeer, conn net.Conn) *testPeer { 134 return &testPeer{ 135 Peer: Start( 136 self.config, 137 conn, 138 peer.cert, 139 peer.nodeID, 140 NewThrottledMessageQueue( 141 self.config.Metrics, 142 peer.nodeID, 143 logging.NoLog{}, 144 throttling.NewNoOutboundThrottler(), 145 ), 146 ), 147 inboundMsgChan: self.inboundMsgChan, 148 } 149 } 150 151 func startTestPeers(rawPeer0 *rawTestPeer, rawPeer1 *rawTestPeer) (*testPeer, *testPeer) { 152 conn0, conn1 := net.Pipe() 153 peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) 154 peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) 155 return peer0, peer1 156 } 157 158 func awaitReady(t *testing.T, peers ...Peer) { 159 t.Helper() 160 require := require.New(t) 161 162 for _, peer := range peers { 163 require.NoError(peer.AwaitReady(context.Background())) 164 require.True(peer.Ready()) 165 } 166 } 167 168 func TestReady(t *testing.T) { 169 require := require.New(t) 170 171 config := newConfig(t) 172 173 rawPeer0 := newRawTestPeer(t, config) 174 rawPeer1 := newRawTestPeer(t, config) 175 176 conn0, conn1 := net.Pipe() 177 178 peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) 179 require.False(peer0.Ready()) 180 181 peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) 182 awaitReady(t, peer0, peer1) 183 184 peer0.StartClose() 185 require.NoError(peer0.AwaitClosed(context.Background())) 186 require.NoError(peer1.AwaitClosed(context.Background())) 187 } 188 189 func TestSend(t *testing.T) { 190 require := require.New(t) 191 192 sharedConfig := newConfig(t) 193 194 rawPeer0 := newRawTestPeer(t, sharedConfig) 195 rawPeer1 := newRawTestPeer(t, sharedConfig) 196 197 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 198 awaitReady(t, peer0, peer1) 199 200 outboundGetMsg, err := sharedConfig.MessageCreator.Get(ids.Empty, 1, time.Second, ids.Empty) 201 require.NoError(err) 202 203 require.True(peer0.Send(context.Background(), outboundGetMsg)) 204 205 inboundGetMsg := <-peer1.inboundMsgChan 206 require.Equal(message.GetOp, inboundGetMsg.Op()) 207 208 peer1.StartClose() 209 require.NoError(peer0.AwaitClosed(context.Background())) 210 require.NoError(peer1.AwaitClosed(context.Background())) 211 } 212 213 func TestPingUptimes(t *testing.T) { 214 trackedSubnetID := ids.GenerateTestID() 215 untrackedSubnetID := ids.GenerateTestID() 216 217 sharedConfig := newConfig(t) 218 sharedConfig.MySubnets = set.Of(trackedSubnetID) 219 220 testCases := []struct { 221 name string 222 msg message.OutboundMessage 223 shouldClose bool 224 assertFn func(*require.Assertions, *testPeer) 225 }{ 226 { 227 name: "primary network only", 228 msg: func() message.OutboundMessage { 229 pingMsg, err := sharedConfig.MessageCreator.Ping(1, nil) 230 require.NoError(t, err) 231 return pingMsg 232 }(), 233 shouldClose: false, 234 assertFn: func(require *require.Assertions, peer *testPeer) { 235 uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) 236 require.True(ok) 237 require.Equal(uint32(1), uptime) 238 239 uptime, ok = peer.ObservedUptime(trackedSubnetID) 240 require.False(ok) 241 require.Zero(uptime) 242 }, 243 }, 244 { 245 name: "primary network and subnet", 246 msg: func() message.OutboundMessage { 247 pingMsg, err := sharedConfig.MessageCreator.Ping( 248 1, 249 []*p2p.SubnetUptime{ 250 { 251 SubnetId: trackedSubnetID[:], 252 Uptime: 1, 253 }, 254 }, 255 ) 256 require.NoError(t, err) 257 return pingMsg 258 }(), 259 shouldClose: false, 260 assertFn: func(require *require.Assertions, peer *testPeer) { 261 uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) 262 require.True(ok) 263 require.Equal(uint32(1), uptime) 264 265 uptime, ok = peer.ObservedUptime(trackedSubnetID) 266 require.True(ok) 267 require.Equal(uint32(1), uptime) 268 }, 269 }, 270 { 271 name: "primary network and non tracked subnet", 272 msg: func() message.OutboundMessage { 273 pingMsg, err := sharedConfig.MessageCreator.Ping( 274 1, 275 []*p2p.SubnetUptime{ 276 { 277 // Providing the untrackedSubnetID here should cause 278 // the remote peer to disconnect from us. 279 SubnetId: untrackedSubnetID[:], 280 Uptime: 1, 281 }, 282 { 283 SubnetId: trackedSubnetID[:], 284 Uptime: 1, 285 }, 286 }, 287 ) 288 require.NoError(t, err) 289 return pingMsg 290 }(), 291 shouldClose: true, 292 assertFn: nil, 293 }, 294 } 295 296 // The raw peers are generated outside of the test cases to avoid generating 297 // many TLS keys. 298 rawPeer0 := newRawTestPeer(t, sharedConfig) 299 rawPeer1 := newRawTestPeer(t, sharedConfig) 300 301 for _, tc := range testCases { 302 t.Run(tc.name, func(t *testing.T) { 303 require := require.New(t) 304 305 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 306 awaitReady(t, peer0, peer1) 307 defer func() { 308 peer1.StartClose() 309 peer0.StartClose() 310 require.NoError(peer0.AwaitClosed(context.Background())) 311 require.NoError(peer1.AwaitClosed(context.Background())) 312 }() 313 314 require.True(peer0.Send(context.Background(), tc.msg)) 315 316 if tc.shouldClose { 317 require.NoError(peer1.AwaitClosed(context.Background())) 318 return 319 } 320 321 // we send Get message after ping to ensure Ping is handled by the 322 // time Get is handled. This is because Get is routed to the handler 323 // whereas Ping is handled by the peer directly. We have no way to 324 // know when the peer has handled the Ping message. 325 sendAndFlush(t, peer0, peer1) 326 327 tc.assertFn(require, peer1) 328 }) 329 } 330 } 331 332 func TestTrackedSubnets(t *testing.T) { 333 sharedConfig := newConfig(t) 334 rawPeer0 := newRawTestPeer(t, sharedConfig) 335 rawPeer1 := newRawTestPeer(t, sharedConfig) 336 337 makeSubnetIDs := func(numSubnets int) []ids.ID { 338 subnetIDs := make([]ids.ID, numSubnets) 339 for i := range subnetIDs { 340 subnetIDs[i] = ids.GenerateTestID() 341 } 342 return subnetIDs 343 } 344 345 tests := []struct { 346 name string 347 trackedSubnets []ids.ID 348 shouldDisconnect bool 349 }{ 350 { 351 name: "primary network only", 352 trackedSubnets: makeSubnetIDs(0), 353 shouldDisconnect: false, 354 }, 355 { 356 name: "single subnet", 357 trackedSubnets: makeSubnetIDs(1), 358 shouldDisconnect: false, 359 }, 360 { 361 name: "max subnets", 362 trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets), 363 shouldDisconnect: false, 364 }, 365 { 366 name: "too many subnets", 367 trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets + 1), 368 shouldDisconnect: true, 369 }, 370 } 371 372 for _, test := range tests { 373 t.Run(test.name, func(t *testing.T) { 374 require := require.New(t) 375 376 rawPeer0.config.MySubnets = set.Of(test.trackedSubnets...) 377 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 378 if test.shouldDisconnect { 379 require.NoError(peer0.AwaitClosed(context.Background())) 380 require.NoError(peer1.AwaitClosed(context.Background())) 381 return 382 } 383 384 defer func() { 385 peer1.StartClose() 386 peer0.StartClose() 387 require.NoError(peer0.AwaitClosed(context.Background())) 388 require.NoError(peer1.AwaitClosed(context.Background())) 389 }() 390 391 awaitReady(t, peer0, peer1) 392 393 require.Equal(set.Of(constants.PrimaryNetworkID), peer0.TrackedSubnets()) 394 395 expectedTrackedSubnets := set.Of(test.trackedSubnets...) 396 expectedTrackedSubnets.Add(constants.PrimaryNetworkID) 397 require.Equal(expectedTrackedSubnets, peer1.TrackedSubnets()) 398 }) 399 } 400 } 401 402 // Test that a peer using the wrong BLS key is disconnected from. 403 func TestInvalidBLSKeyDisconnects(t *testing.T) { 404 require := require.New(t) 405 406 sharedConfig := newConfig(t) 407 408 rawPeer0 := newRawTestPeer(t, sharedConfig) 409 rawPeer1 := newRawTestPeer(t, sharedConfig) 410 411 require.NoError(rawPeer0.config.Validators.AddStaker( 412 constants.PrimaryNetworkID, 413 rawPeer1.nodeID, 414 bls.PublicFromSecretKey(rawPeer1.config.IPSigner.blsSigner), 415 ids.GenerateTestID(), 416 1, 417 )) 418 419 bogusBLSKey, err := bls.NewSecretKey() 420 require.NoError(err) 421 require.NoError(rawPeer1.config.Validators.AddStaker( 422 constants.PrimaryNetworkID, 423 rawPeer0.nodeID, 424 bls.PublicFromSecretKey(bogusBLSKey), // This is the wrong BLS key for this peer 425 ids.GenerateTestID(), 426 1, 427 )) 428 429 peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) 430 431 // Because peer1 thinks that peer0 is using the wrong BLS key, they should 432 // disconnect from each other. 433 require.NoError(peer0.AwaitClosed(context.Background())) 434 require.NoError(peer1.AwaitClosed(context.Background())) 435 } 436 437 func TestShouldDisconnect(t *testing.T) { 438 peerID := ids.GenerateTestNodeID() 439 txID := ids.GenerateTestID() 440 blsKey, err := bls.NewSecretKey() 441 require.NoError(t, err) 442 443 tests := []struct { 444 name string 445 initialPeer *peer 446 expectedPeer *peer 447 expectedShouldDisconnect bool 448 }{ 449 { 450 name: "peer is reporting old version", 451 initialPeer: &peer{ 452 Config: &Config{ 453 Log: logging.NoLog{}, 454 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 455 }, 456 version: &version.Application{ 457 Name: version.Client, 458 Major: 0, 459 Minor: 0, 460 Patch: 0, 461 }, 462 }, 463 expectedPeer: &peer{ 464 Config: &Config{ 465 Log: logging.NoLog{}, 466 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 467 }, 468 version: &version.Application{ 469 Name: version.Client, 470 Major: 0, 471 Minor: 0, 472 Patch: 0, 473 }, 474 }, 475 expectedShouldDisconnect: true, 476 }, 477 { 478 name: "peer is not a validator", 479 initialPeer: &peer{ 480 Config: &Config{ 481 Log: logging.NoLog{}, 482 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 483 Validators: validators.NewManager(), 484 }, 485 version: version.CurrentApp, 486 }, 487 expectedPeer: &peer{ 488 Config: &Config{ 489 Log: logging.NoLog{}, 490 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 491 Validators: validators.NewManager(), 492 }, 493 version: version.CurrentApp, 494 }, 495 expectedShouldDisconnect: false, 496 }, 497 { 498 name: "peer is a validator without a BLS key", 499 initialPeer: &peer{ 500 Config: &Config{ 501 Log: logging.NoLog{}, 502 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 503 Validators: func() validators.Manager { 504 vdrs := validators.NewManager() 505 require.NoError(t, vdrs.AddStaker( 506 constants.PrimaryNetworkID, 507 peerID, 508 nil, 509 txID, 510 1, 511 )) 512 return vdrs 513 }(), 514 }, 515 id: peerID, 516 version: version.CurrentApp, 517 }, 518 expectedPeer: &peer{ 519 Config: &Config{ 520 Log: logging.NoLog{}, 521 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 522 Validators: func() validators.Manager { 523 vdrs := validators.NewManager() 524 require.NoError(t, vdrs.AddStaker( 525 constants.PrimaryNetworkID, 526 peerID, 527 nil, 528 txID, 529 1, 530 )) 531 return vdrs 532 }(), 533 }, 534 id: peerID, 535 version: version.CurrentApp, 536 }, 537 expectedShouldDisconnect: false, 538 }, 539 { 540 name: "already verified peer", 541 initialPeer: &peer{ 542 Config: &Config{ 543 Log: logging.NoLog{}, 544 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 545 Validators: func() validators.Manager { 546 vdrs := validators.NewManager() 547 require.NoError(t, vdrs.AddStaker( 548 constants.PrimaryNetworkID, 549 peerID, 550 bls.PublicFromSecretKey(blsKey), 551 txID, 552 1, 553 )) 554 return vdrs 555 }(), 556 }, 557 id: peerID, 558 version: version.CurrentApp, 559 txIDOfVerifiedBLSKey: txID, 560 }, 561 expectedPeer: &peer{ 562 Config: &Config{ 563 Log: logging.NoLog{}, 564 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 565 Validators: func() validators.Manager { 566 vdrs := validators.NewManager() 567 require.NoError(t, vdrs.AddStaker( 568 constants.PrimaryNetworkID, 569 peerID, 570 bls.PublicFromSecretKey(blsKey), 571 txID, 572 1, 573 )) 574 return vdrs 575 }(), 576 }, 577 id: peerID, 578 version: version.CurrentApp, 579 txIDOfVerifiedBLSKey: txID, 580 }, 581 expectedShouldDisconnect: false, 582 }, 583 { 584 name: "peer without signature", 585 initialPeer: &peer{ 586 Config: &Config{ 587 Log: logging.NoLog{}, 588 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 589 Validators: func() validators.Manager { 590 vdrs := validators.NewManager() 591 require.NoError(t, vdrs.AddStaker( 592 constants.PrimaryNetworkID, 593 peerID, 594 bls.PublicFromSecretKey(blsKey), 595 txID, 596 1, 597 )) 598 return vdrs 599 }(), 600 }, 601 id: peerID, 602 version: version.CurrentApp, 603 ip: &SignedIP{}, 604 }, 605 expectedPeer: &peer{ 606 Config: &Config{ 607 Log: logging.NoLog{}, 608 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 609 Validators: func() validators.Manager { 610 vdrs := validators.NewManager() 611 require.NoError(t, vdrs.AddStaker( 612 constants.PrimaryNetworkID, 613 peerID, 614 bls.PublicFromSecretKey(blsKey), 615 txID, 616 1, 617 )) 618 return vdrs 619 }(), 620 }, 621 id: peerID, 622 version: version.CurrentApp, 623 ip: &SignedIP{}, 624 }, 625 expectedShouldDisconnect: true, 626 }, 627 { 628 name: "peer with invalid signature", 629 initialPeer: &peer{ 630 Config: &Config{ 631 Log: logging.NoLog{}, 632 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 633 Validators: func() validators.Manager { 634 vdrs := validators.NewManager() 635 require.NoError(t, vdrs.AddStaker( 636 constants.PrimaryNetworkID, 637 peerID, 638 bls.PublicFromSecretKey(blsKey), 639 txID, 640 1, 641 )) 642 return vdrs 643 }(), 644 }, 645 id: peerID, 646 version: version.CurrentApp, 647 ip: &SignedIP{ 648 BLSSignature: bls.SignProofOfPossession(blsKey, []byte("wrong message")), 649 }, 650 }, 651 expectedPeer: &peer{ 652 Config: &Config{ 653 Log: logging.NoLog{}, 654 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 655 Validators: func() validators.Manager { 656 vdrs := validators.NewManager() 657 require.NoError(t, vdrs.AddStaker( 658 constants.PrimaryNetworkID, 659 peerID, 660 bls.PublicFromSecretKey(blsKey), 661 txID, 662 1, 663 )) 664 return vdrs 665 }(), 666 }, 667 id: peerID, 668 version: version.CurrentApp, 669 ip: &SignedIP{ 670 BLSSignature: bls.SignProofOfPossession(blsKey, []byte("wrong message")), 671 }, 672 }, 673 expectedShouldDisconnect: true, 674 }, 675 { 676 name: "peer with valid signature", 677 initialPeer: &peer{ 678 Config: &Config{ 679 Log: logging.NoLog{}, 680 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 681 Validators: func() validators.Manager { 682 vdrs := validators.NewManager() 683 require.NoError(t, vdrs.AddStaker( 684 constants.PrimaryNetworkID, 685 peerID, 686 bls.PublicFromSecretKey(blsKey), 687 txID, 688 1, 689 )) 690 return vdrs 691 }(), 692 }, 693 id: peerID, 694 version: version.CurrentApp, 695 ip: &SignedIP{ 696 BLSSignature: bls.SignProofOfPossession(blsKey, (&UnsignedIP{}).bytes()), 697 }, 698 }, 699 expectedPeer: &peer{ 700 Config: &Config{ 701 Log: logging.NoLog{}, 702 VersionCompatibility: version.GetCompatibility(constants.UnitTestID), 703 Validators: func() validators.Manager { 704 vdrs := validators.NewManager() 705 require.NoError(t, vdrs.AddStaker( 706 constants.PrimaryNetworkID, 707 peerID, 708 bls.PublicFromSecretKey(blsKey), 709 txID, 710 1, 711 )) 712 return vdrs 713 }(), 714 }, 715 id: peerID, 716 version: version.CurrentApp, 717 ip: &SignedIP{ 718 BLSSignature: bls.SignProofOfPossession(blsKey, (&UnsignedIP{}).bytes()), 719 }, 720 txIDOfVerifiedBLSKey: txID, 721 }, 722 expectedShouldDisconnect: false, 723 }, 724 } 725 726 for _, test := range tests { 727 t.Run(test.name, func(t *testing.T) { 728 require := require.New(t) 729 730 shouldDisconnect := test.initialPeer.shouldDisconnect() 731 require.Equal(test.expectedPeer, test.initialPeer) 732 require.Equal(test.expectedShouldDisconnect, shouldDisconnect) 733 }) 734 } 735 } 736 737 // Helper to send a message from sender to receiver and assert that the 738 // receiver receives the message. This can be used to test a prior message 739 // was handled by the peer. 740 func sendAndFlush(t *testing.T, sender *testPeer, receiver *testPeer) { 741 t.Helper() 742 mc := newMessageCreator(t) 743 outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) 744 require.NoError(t, err) 745 require.True(t, sender.Send(context.Background(), outboundGetMsg)) 746 inboundGetMsg := <-receiver.inboundMsgChan 747 require.Equal(t, message.GetOp, inboundGetMsg.Op()) 748 }