github.com/ethersphere/bee/v2@v2.2.0/pkg/p2p/libp2p/connections_test.go (about) 1 // Copyright 2020 The Swarm Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package libp2p_test 6 7 import ( 8 "bytes" 9 "context" 10 "errors" 11 "io" 12 "math/rand" 13 "reflect" 14 "strings" 15 "sync" 16 "testing" 17 "time" 18 19 "github.com/ethersphere/bee/v2/pkg/addressbook" 20 "github.com/ethersphere/bee/v2/pkg/log" 21 "github.com/ethersphere/bee/v2/pkg/p2p" 22 "github.com/ethersphere/bee/v2/pkg/p2p/libp2p" 23 "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake" 24 "github.com/ethersphere/bee/v2/pkg/spinlock" 25 "github.com/ethersphere/bee/v2/pkg/statestore/mock" 26 "github.com/ethersphere/bee/v2/pkg/swarm" 27 "github.com/ethersphere/bee/v2/pkg/topology/lightnode" 28 "github.com/libp2p/go-libp2p/p2p/host/eventbus" 29 30 libp2pm "github.com/libp2p/go-libp2p" 31 "github.com/libp2p/go-libp2p/core/event" 32 "github.com/libp2p/go-libp2p/core/host" 33 "github.com/libp2p/go-libp2p/core/network" 34 libp2ppeer "github.com/libp2p/go-libp2p/core/peer" 35 bhost "github.com/libp2p/go-libp2p/p2p/host/basic" 36 swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" 37 ma "github.com/multiformats/go-multiaddr" 38 ) 39 40 const ( 41 testDisconnectMsg = "test disconnect" 42 testBlocklistMsg = "test blocklist" 43 ) 44 45 func TestAddresses(t *testing.T) { 46 t.Parallel() 47 48 s, _ := newService(t, 1, libp2pServiceOpts{}) 49 50 addrs, err := s.Addresses() 51 if err != nil { 52 t.Fatal(err) 53 } 54 if l := len(addrs); l == 0 { 55 t.Fatal("no addresses") 56 } 57 } 58 59 func TestConnectDisconnect(t *testing.T) { 60 t.Parallel() 61 62 ctx, cancel := context.WithCancel(context.Background()) 63 defer cancel() 64 65 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 66 FullNode: true, 67 }}) 68 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 69 70 addr := serviceUnderlayAddress(t, s1) 71 72 bzzAddr, err := s2.Connect(ctx, addr) 73 if err != nil { 74 t.Fatal(err) 75 } 76 77 expectPeers(t, s2, overlay1) 78 expectPeersEventually(t, s1, overlay2) 79 80 if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); err != nil { 81 t.Fatal(err) 82 } 83 84 expectPeers(t, s2) 85 expectPeersEventually(t, s1) 86 } 87 88 func TestConnectToLightPeer(t *testing.T) { 89 t.Parallel() 90 91 ctx, cancel := context.WithCancel(context.Background()) 92 defer cancel() 93 94 s1, _ := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 95 FullNode: false, 96 }}) 97 s2, _ := newService(t, 1, libp2pServiceOpts{}) 98 99 addr := serviceUnderlayAddress(t, s1) 100 101 _, err := s2.Connect(ctx, addr) 102 if !errors.Is(err, p2p.ErrDialLightNode) { 103 t.Fatalf("expected err %v, got %v", p2p.ErrDialLightNode, err) 104 } 105 106 expectPeers(t, s2) 107 expectPeersEventually(t, s1) 108 } 109 110 func TestLightPeerLimit(t *testing.T) { 111 t.Parallel() 112 113 ctx, cancel := context.WithCancel(context.Background()) 114 defer cancel() 115 116 var ( 117 limit = 3 118 container = lightnode.NewContainer(swarm.RandAddress(t)) 119 notifier = mockNotifier(noopCf, noopDf, true) 120 sf, _ = newService(t, 1, libp2pServiceOpts{ 121 lightNodes: container, 122 libp2pOpts: libp2p.Options{ 123 LightNodeLimit: limit, 124 FullNode: true, 125 }, 126 notifier: notifier, 127 }) 128 ) 129 130 addr := serviceUnderlayAddress(t, sf) 131 132 for i := 0; i < 5; i++ { 133 sl, _ := newService(t, 1, libp2pServiceOpts{ 134 notifier: notifier, 135 libp2pOpts: libp2p.Options{ 136 FullNode: false, 137 }, 138 }) 139 _, err := sl.Connect(ctx, addr) 140 if err != nil { 141 t.Fatal(err) 142 } 143 } 144 145 err := spinlock.Wait(time.Second, func() bool { 146 return container.Count() == limit 147 }) 148 if err != nil { 149 t.Fatal("timed out waiting for correct number of lightnodes") 150 } 151 } 152 153 // TestStreamsMaxIncomingLimit validates that a session between peers can 154 // sustain up to the maximal configured concurrent streams, that all further 155 // streams will result with ErrReset error, and that when the number of 156 // concurrent streams is below the limit, new streams are created without 157 // errors. 158 func TestStreamsMaxIncomingLimit(t *testing.T) { 159 t.Parallel() 160 161 maxIncomingStreams := 5000 162 163 ctx, cancel := context.WithCancel(context.Background()) 164 defer cancel() 165 166 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 167 FullNode: true, 168 }}) 169 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 170 171 var ( 172 streamsMu sync.Mutex 173 streams = make([]p2p.Stream, 0) 174 ) 175 176 testProtocolSpec := p2p.ProtocolSpec{ 177 Name: testProtocolName, 178 Version: testProtocolVersion, 179 StreamSpecs: []p2p.StreamSpec{ 180 { 181 Name: testStreamName, 182 Handler: func(ctx context.Context, p p2p.Peer, s p2p.Stream) error { 183 streamsMu.Lock() 184 defer streamsMu.Unlock() 185 186 streams = append(streams, s) 187 return nil 188 }, 189 }, 190 }, 191 } 192 193 t.Cleanup(func() { 194 for _, s := range streams { 195 if err := s.Reset(); err != nil { 196 t.Error(err) 197 } 198 } 199 }) 200 201 testProtocolClient := func() error { 202 _, err := s2.NewStream(ctx, overlay1, nil, testProtocolName, testProtocolVersion, testStreamName) 203 if err != nil { 204 return err 205 } 206 // do not close or rest the stream in defer in order to keep the stream active 207 return nil 208 } 209 210 if err := s1.AddProtocol(testProtocolSpec); err != nil { 211 t.Fatal(err) 212 } 213 214 if _, err := s2.Connect(ctx, serviceUnderlayAddress(t, s1)); err != nil { 215 t.Fatal(err) 216 } 217 218 expectPeers(t, s2, overlay1) 219 expectPeersEventually(t, s1, overlay2) 220 221 overflowStreamCount := maxIncomingStreams / 4 222 223 // create streams over the limit 224 225 for i := 0; i < maxIncomingStreams+overflowStreamCount; i++ { 226 err := testProtocolClient() 227 if i < maxIncomingStreams { 228 if err != nil { 229 t.Errorf("test protocol client %v: %v", i, err) 230 } 231 } else { 232 if err == nil { 233 t.Errorf("test protocol client %v got nil error", i) 234 } 235 } 236 } 237 238 if len(streams) != maxIncomingStreams { 239 t.Errorf("got %v streams, want %v", len(streams), maxIncomingStreams) 240 } 241 242 closeStreamCount := len(streams) / 2 243 244 // close random streams to validate new streams creation 245 246 random := rand.New(rand.NewSource(time.Now().UnixNano())) 247 for i := 0; i < closeStreamCount; i++ { 248 n := random.Intn(len(streams)) 249 if err := streams[n].Reset(); err != nil { 250 t.Error(err) 251 continue 252 } 253 streams = append(streams[:n], streams[n+1:]...) 254 } 255 256 if maxIncomingStreams-len(streams) != closeStreamCount { 257 t.Errorf("got %v closed streams, want %v", maxIncomingStreams-len(streams), closeStreamCount) 258 } 259 260 // create new streams 261 262 for i := 0; i < closeStreamCount+overflowStreamCount; i++ { 263 err := testProtocolClient() 264 if i < closeStreamCount { 265 if err != nil { 266 t.Errorf("test protocol client %v: %v", i, err) 267 } 268 } else { 269 if err == nil { 270 t.Errorf("test protocol client %v got nil error", i) 271 } 272 } 273 } 274 275 if len(streams) != maxIncomingStreams { 276 t.Errorf("got %v streams, want %v", len(streams), maxIncomingStreams) 277 } 278 279 expectPeers(t, s2, overlay1) 280 expectPeersEventually(t, s1, overlay2) 281 } 282 283 func TestDoubleConnect(t *testing.T) { 284 t.Parallel() 285 286 ctx, cancel := context.WithCancel(context.Background()) 287 defer cancel() 288 289 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 290 FullNode: true, 291 }}) 292 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 293 294 addr := serviceUnderlayAddress(t, s1) 295 296 if _, err := s2.Connect(ctx, addr); err != nil { 297 t.Fatal(err) 298 } 299 300 expectPeers(t, s2, overlay1) 301 expectPeersEventually(t, s1, overlay2) 302 303 if _, err := s2.Connect(ctx, addr); !errors.Is(err, p2p.ErrAlreadyConnected) { 304 t.Fatalf("expected %s error, got %s error", p2p.ErrAlreadyConnected, err) 305 } 306 307 expectPeers(t, s2, overlay1) 308 expectPeers(t, s1, overlay2) 309 } 310 311 func TestDoubleDisconnect(t *testing.T) { 312 t.Parallel() 313 314 ctx, cancel := context.WithCancel(context.Background()) 315 defer cancel() 316 317 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 318 FullNode: true, 319 }}) 320 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 321 322 addr := serviceUnderlayAddress(t, s1) 323 324 bzzAddr, err := s2.Connect(ctx, addr) 325 if err != nil { 326 t.Fatal(err) 327 } 328 329 expectPeers(t, s2, overlay1) 330 expectPeersEventually(t, s1, overlay2) 331 332 if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); err != nil { 333 t.Fatal(err) 334 } 335 336 expectPeers(t, s2) 337 expectPeersEventually(t, s1) 338 339 if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); !errors.Is(err, p2p.ErrPeerNotFound) { 340 t.Errorf("got error %v, want %v", err, p2p.ErrPeerNotFound) 341 } 342 343 expectPeers(t, s2) 344 expectPeersEventually(t, s1) 345 } 346 347 func TestMultipleConnectDisconnect(t *testing.T) { 348 t.Parallel() 349 350 ctx, cancel := context.WithCancel(context.Background()) 351 defer cancel() 352 353 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 354 FullNode: true, 355 }}) 356 357 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 358 359 addr := serviceUnderlayAddress(t, s1) 360 361 bzzAddr, err := s2.Connect(ctx, addr) 362 if err != nil { 363 t.Fatal(err) 364 } 365 366 expectPeers(t, s2, overlay1) 367 expectPeersEventually(t, s1, overlay2) 368 369 if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); err != nil { 370 t.Fatal(err) 371 } 372 373 expectPeers(t, s2) 374 expectPeersEventually(t, s1) 375 376 bzzAddr, err = s2.Connect(ctx, addr) 377 if err != nil { 378 t.Fatal(err) 379 } 380 381 expectPeers(t, s2, overlay1) 382 expectPeersEventually(t, s1, overlay2) 383 384 if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); err != nil { 385 t.Fatal(err) 386 } 387 388 expectPeers(t, s2) 389 expectPeersEventually(t, s1) 390 } 391 392 func TestConnectDisconnectOnAllAddresses(t *testing.T) { 393 t.Parallel() 394 395 ctx, cancel := context.WithCancel(context.Background()) 396 defer cancel() 397 398 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 399 FullNode: true, 400 }}) 401 402 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 403 404 addrs, err := s1.Addresses() 405 if err != nil { 406 t.Fatal(err) 407 } 408 for _, addr := range addrs { 409 bzzAddr, err := s2.Connect(ctx, addr) 410 if err != nil { 411 t.Fatal(err) 412 } 413 414 expectPeers(t, s2, overlay1) 415 expectPeersEventually(t, s1, overlay2) 416 417 if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); err != nil { 418 t.Fatal(err) 419 } 420 421 expectPeers(t, s2) 422 expectPeersEventually(t, s1) 423 } 424 } 425 426 func TestDoubleConnectOnAllAddresses(t *testing.T) { 427 t.Parallel() 428 429 ctx, cancel := context.WithCancel(context.Background()) 430 defer cancel() 431 432 s1, overlay1 := newService(t, 1, libp2pServiceOpts{ 433 notifier: mockNotifier(noopCf, noopDf, true), 434 libp2pOpts: libp2p.Options{ 435 FullNode: true, 436 }}) 437 addrs, err := s1.Addresses() 438 if err != nil { 439 t.Fatal(err) 440 } 441 for _, addr := range addrs { 442 // creating new remote host for each address 443 s2, overlay2 := newService(t, 1, libp2pServiceOpts{notifier: mockNotifier(noopCf, noopDf, true)}) 444 445 if _, err := s2.Connect(ctx, addr); err != nil { 446 t.Fatal(err) 447 } 448 449 expectPeers(t, s2, overlay1) 450 expectPeersEventually(t, s1, overlay2) 451 452 if _, err := s2.Connect(ctx, addr); !errors.Is(err, p2p.ErrAlreadyConnected) { 453 t.Fatalf("expected %s error, got %s error", p2p.ErrAlreadyConnected, err) 454 } 455 456 expectPeers(t, s2, overlay1) 457 expectPeers(t, s1, overlay2) 458 459 if err := s2.Disconnect(overlay1, testDisconnectMsg); err != nil { 460 t.Fatal(err) 461 } 462 463 expectPeers(t, s2) 464 expectPeersEventually(t, s1) 465 466 s2.Close() 467 } 468 } 469 470 func TestDifferentNetworkIDs(t *testing.T) { 471 t.Parallel() 472 473 ctx, cancel := context.WithCancel(context.Background()) 474 defer cancel() 475 476 s1, _ := newService(t, 1, libp2pServiceOpts{}) 477 s2, _ := newService(t, 2, libp2pServiceOpts{}) 478 479 addr := serviceUnderlayAddress(t, s1) 480 481 if _, err := s2.Connect(ctx, addr); err == nil { 482 t.Fatal("connect attempt should result with an error") 483 } 484 485 expectPeers(t, s1) 486 expectPeers(t, s2) 487 } 488 489 func TestConnectWithEnabledWSTransports(t *testing.T) { 490 t.Parallel() 491 492 ctx, cancel := context.WithCancel(context.Background()) 493 defer cancel() 494 495 s1, overlay1 := newService(t, 1, libp2pServiceOpts{ 496 libp2pOpts: libp2p.Options{ 497 EnableWS: true, 498 FullNode: true, 499 }, 500 }) 501 502 s2, overlay2 := newService(t, 1, libp2pServiceOpts{ 503 libp2pOpts: libp2p.Options{ 504 EnableWS: true, 505 FullNode: true, 506 }, 507 }) 508 509 addr := serviceUnderlayAddress(t, s1) 510 511 if _, err := s2.Connect(ctx, addr); err != nil { 512 t.Fatal(err) 513 } 514 515 expectPeers(t, s2, overlay1) 516 expectPeersEventually(t, s1, overlay2) 517 } 518 519 // TestConnectRepeatHandshake tests if handshake was attempted more then once by the same peer 520 func TestConnectRepeatHandshake(t *testing.T) { 521 t.Parallel() 522 523 ctx, cancel := context.WithCancel(context.Background()) 524 defer cancel() 525 526 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 527 FullNode: true, 528 }}) 529 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 530 531 addr := serviceUnderlayAddress(t, s1) 532 533 _, err := s2.Connect(ctx, addr) 534 if err != nil { 535 t.Fatal(err) 536 } 537 538 expectPeers(t, s2, overlay1) 539 expectPeersEventually(t, s1, overlay2) 540 541 info, err := libp2ppeer.AddrInfoFromP2pAddr(addr) 542 if err != nil { 543 t.Fatal(err) 544 } 545 546 stream, err := s2.NewStreamForPeerID(info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName) 547 if err != nil { 548 t.Fatal(err) 549 } 550 551 if _, err := s2.HandshakeService().Handshake(ctx, s2.WrapStream(stream), info.Addrs[0], info.ID); err != nil { 552 t.Fatal(err) 553 } 554 555 expectPeersEventually(t, s2, overlay1) 556 expectPeersEventually(t, s1, overlay2) 557 } 558 559 func TestBlocklisting(t *testing.T) { 560 t.Parallel() 561 562 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 563 FullNode: true, 564 }}) 565 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 566 567 addr1 := serviceUnderlayAddress(t, s1) 568 addr2 := serviceUnderlayAddress(t, s2) 569 570 _, err := s2.Connect(context.Background(), addr1) 571 if err != nil { 572 t.Fatal(err) 573 } 574 575 expectPeers(t, s2, overlay1) 576 expectPeersEventually(t, s1, overlay2) 577 578 if err := s2.Blocklist(overlay1, 0, testBlocklistMsg); err != nil { 579 t.Fatal(err) 580 } 581 582 expectPeers(t, s2) 583 expectPeersEventually(t, s1) 584 585 _, err = s2.Connect(context.Background(), addr1) 586 if err == nil { 587 t.Fatal("expected error during connection, got nil") 588 } 589 590 expectPeers(t, s2) 591 expectPeersEventually(t, s1) 592 593 _, err = s1.Connect(context.Background(), addr2) 594 if err == nil { 595 t.Fatal("expected error during connection, got nil") 596 } 597 598 expectPeersEventually(t, s1) 599 expectPeers(t, s2) 600 } 601 602 func TestBlocklistedPeers(t *testing.T) { 603 t.Parallel() 604 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 605 FullNode: true, 606 }}) 607 s2, _ := newService(t, 1, libp2pServiceOpts{}) 608 addr1 := serviceUnderlayAddress(t, s1) 609 _, err := s2.Connect(context.Background(), addr1) 610 if err != nil { 611 t.Fatal(err) 612 } 613 614 if err = s2.Blocklist(overlay1, 0, testBlocklistMsg); err != nil { 615 t.Fatal(err) 616 } 617 blocklistedPeers, err := s2.BlocklistedPeers() 618 if err != nil { 619 t.Fatal("could not get blocklisted peers", err) 620 } 621 622 want := []p2p.BlockListedPeer{ 623 { 624 Peer: p2p.Peer{ 625 Address: overlay1, 626 // we can't mock peerRegistry. So we can't know if the peer is a full node or not. 627 // TODO: consider injecting peerRegistry in libp2pService 628 FullNode: true, 629 }, 630 Reason: testBlocklistMsg, 631 Duration: 0, 632 }, 633 } 634 635 if !reflect.DeepEqual(want, blocklistedPeers) { 636 t.Fatal("want", want, "got", blocklistedPeers) 637 } 638 } 639 640 func TestTopologyNotifier(t *testing.T) { 641 t.Parallel() 642 643 var ( 644 mtx sync.Mutex 645 ctx = context.Background() 646 647 ab1, ab2 = addressbook.New(mock.NewStateStore()), addressbook.New(mock.NewStateStore()) 648 649 n1connectedPeer p2p.Peer 650 n1disconnectedPeer p2p.Peer 651 n2connectedPeer p2p.Peer 652 n2disconnectedPeer p2p.Peer 653 654 n1c = func(_ context.Context, p p2p.Peer, _ bool) error { 655 mtx.Lock() 656 defer mtx.Unlock() 657 expectZeroAddress(t, n1connectedPeer.Address) // fail if set more than once 658 expectFullNode(t, p) 659 n1connectedPeer = p 660 return nil 661 } 662 n1d = func(p p2p.Peer) { 663 mtx.Lock() 664 defer mtx.Unlock() 665 n1disconnectedPeer = p 666 } 667 668 n2c = func(_ context.Context, p p2p.Peer, _ bool) error { 669 mtx.Lock() 670 defer mtx.Unlock() 671 expectZeroAddress(t, n2connectedPeer.Address) // fail if set more than once 672 n2connectedPeer = p 673 expectFullNode(t, p) 674 return nil 675 } 676 n2d = func(p p2p.Peer) { 677 mtx.Lock() 678 defer mtx.Unlock() 679 n2disconnectedPeer = p 680 } 681 ) 682 notifier1 := mockNotifier(n1c, n1d, true) 683 s1, overlay1 := newService(t, 1, libp2pServiceOpts{ 684 Addressbook: ab1, 685 libp2pOpts: libp2p.Options{ 686 FullNode: true, 687 }, 688 }) 689 s1.SetPickyNotifier(notifier1) 690 691 notifier2 := mockNotifier(n2c, n2d, true) 692 s2, overlay2 := newService(t, 1, libp2pServiceOpts{ 693 Addressbook: ab2, 694 libp2pOpts: libp2p.Options{ 695 FullNode: true, 696 }, 697 }) 698 s2.SetPickyNotifier(notifier2) 699 700 addr := serviceUnderlayAddress(t, s1) 701 702 // s2 connects to s1, thus the notifier on s1 should be called on Connect 703 bzzAddr, err := s2.Connect(ctx, addr) 704 if err != nil { 705 t.Fatal(err) 706 } 707 708 expectPeers(t, s2, overlay1) 709 expectPeersEventually(t, s1, overlay2) 710 711 // expect that n1 notifee called with s2 overlay 712 waitAddrSet(t, &n1connectedPeer.Address, &mtx, overlay2) 713 714 mtx.Lock() 715 expectZeroAddress(t, n1disconnectedPeer.Address, n2connectedPeer.Address, n2disconnectedPeer.Address) 716 mtx.Unlock() 717 718 // check address book entries are there 719 checkAddressbook(t, ab2, overlay1, addr) 720 721 // s2 disconnects from s1 so s1 disconnect notifiee should be called 722 if err := s2.Disconnect(bzzAddr.Overlay, testDisconnectMsg); err != nil { 723 t.Fatal(err) 724 } 725 726 expectPeers(t, s2) 727 expectPeersEventually(t, s1) 728 waitAddrSet(t, &n1disconnectedPeer.Address, &mtx, overlay2) 729 730 // note that both n1disconnect and n2disconnect callbacks are called after just 731 // one disconnect. this is due to the fact the when the libp2p abstraction is explicitly 732 // called to disconnect from a peer, it will also notify the topology notifiee, since 733 // peer disconnections can also result from components from outside the bound of the 734 // topology driver 735 mtx.Lock() 736 expectZeroAddress(t, n2connectedPeer.Address) 737 mtx.Unlock() 738 739 addr2 := serviceUnderlayAddress(t, s2) 740 // s1 connects to s2, thus the notifiee on s2 should be called on Connect 741 bzzAddr2, err := s1.Connect(ctx, addr2) 742 if err != nil { 743 t.Fatal(err) 744 } 745 746 expectPeers(t, s1, overlay2) 747 expectPeersEventually(t, s2, overlay1) 748 waitAddrSet(t, &n2connectedPeer.Address, &mtx, overlay1) 749 750 // s1 disconnects from s2 so s2 disconnect notifiee should be called 751 if err := s1.Disconnect(bzzAddr2.Overlay, testDisconnectMsg); err != nil { 752 t.Fatal(err) 753 } 754 expectPeers(t, s1) 755 expectPeersEventually(t, s2) 756 waitAddrSet(t, &n2disconnectedPeer.Address, &mtx, overlay1) 757 } 758 759 // TestTopologyAnnounce checks that announcement 760 // works correctly for full nodes and light nodes. 761 func TestTopologyAnnounce(t *testing.T) { 762 t.Parallel() 763 764 var ( 765 mtx sync.Mutex 766 ctx = context.Background() 767 768 ab1, ab2, ab3 = addressbook.New(mock.NewStateStore()), addressbook.New(mock.NewStateStore()), addressbook.New(mock.NewStateStore()) 769 770 announceCalled = false 771 announceToCalled = false 772 773 n1a = func(context.Context, swarm.Address, bool) error { 774 mtx.Lock() 775 announceCalled = true 776 mtx.Unlock() 777 return nil 778 } 779 n1at = func(context.Context, swarm.Address, swarm.Address, bool) error { 780 mtx.Lock() 781 announceToCalled = true 782 mtx.Unlock() 783 return nil 784 } 785 ) 786 // test setup: 2 full nodes and one light 787 // light connect to full(1), then full(2) 788 // connects to full(1), check that full(1) 789 // tried to announce full(2) to light. 790 791 notifier1 := mockAnnouncingNotifier(n1a, n1at) 792 s1, overlay1 := newService(t, 1, libp2pServiceOpts{ 793 Addressbook: ab1, 794 libp2pOpts: libp2p.Options{ 795 FullNode: true, 796 }, 797 }) 798 s1.SetPickyNotifier(notifier1) 799 800 s2, overlay2 := newService(t, 1, libp2pServiceOpts{ 801 Addressbook: ab2, 802 libp2pOpts: libp2p.Options{ 803 FullNode: true, 804 }, 805 }) 806 807 s3, overlay3 := newService(t, 1, libp2pServiceOpts{ 808 Addressbook: ab3, 809 libp2pOpts: libp2p.Options{ 810 FullNode: false, 811 }, 812 }) 813 814 addr := serviceUnderlayAddress(t, s1) 815 816 // s3 (light) connects to s1 (full) 817 _, err := s3.Connect(ctx, addr) 818 if err != nil { 819 t.Fatal(err) 820 } 821 822 expectPeers(t, s3, overlay1) 823 expectPeersEventually(t, s1, overlay3) 824 called := false 825 826 for i := 0; i < 20; i++ { 827 mtx.Lock() 828 called = announceCalled 829 mtx.Unlock() 830 if called { 831 break 832 } 833 time.Sleep(50 * time.Millisecond) 834 } 835 if !called { 836 t.Error("expected announce to be called") 837 } 838 for i := 0; i < 10; i++ { 839 mtx.Lock() 840 called = announceToCalled 841 mtx.Unlock() 842 if called { 843 break 844 } 845 time.Sleep(50 * time.Millisecond) 846 } 847 848 if announceToCalled { 849 t.Error("announceTo called but should not") 850 } 851 852 // check address book entries are there 853 checkAddressbook(t, ab3, overlay1, addr) 854 855 // s2 (full) connects to s1 (full) 856 _, err = s2.Connect(ctx, addr) 857 if err != nil { 858 t.Fatal(err) 859 } 860 861 expectPeers(t, s2, overlay1) 862 expectPeersEventually(t, s1, overlay2, overlay3) 863 864 for i := 0; i < 20; i++ { 865 mtx.Lock() 866 called = announceToCalled 867 mtx.Unlock() 868 if called { 869 break 870 } 871 time.Sleep(50 * time.Millisecond) 872 } 873 874 if !called { 875 t.Error("expected announceTo to be called") 876 } 877 } 878 879 func TestTopologyOverSaturated(t *testing.T) { 880 t.Parallel() 881 882 var ( 883 mtx sync.Mutex 884 ctx = context.Background() 885 886 ab1, ab2 = addressbook.New(mock.NewStateStore()), addressbook.New(mock.NewStateStore()) 887 888 n1connectedPeer p2p.Peer 889 n2connectedPeer p2p.Peer 890 n2disconnectedPeer p2p.Peer 891 892 n1c = func(_ context.Context, p p2p.Peer, _ bool) error { 893 mtx.Lock() 894 defer mtx.Unlock() 895 expectZeroAddress(t, n1connectedPeer.Address) // fail if set more than once 896 n1connectedPeer = p 897 return nil 898 } 899 n1d = func(p p2p.Peer) {} 900 901 n2c = func(_ context.Context, p p2p.Peer, _ bool) error { 902 mtx.Lock() 903 defer mtx.Unlock() 904 expectZeroAddress(t, n2connectedPeer.Address) // fail if set more than once 905 n2connectedPeer = p 906 return nil 907 } 908 n2d = func(p p2p.Peer) { 909 mtx.Lock() 910 defer mtx.Unlock() 911 n2disconnectedPeer = p 912 } 913 ) 914 // this notifier will not pick the peer 915 notifier1 := mockNotifier(n1c, n1d, false) 916 s1, overlay1 := newService(t, 1, libp2pServiceOpts{Addressbook: ab1, libp2pOpts: libp2p.Options{ 917 FullNode: true, 918 }}) 919 s1.SetPickyNotifier(notifier1) 920 921 notifier2 := mockNotifier(n2c, n2d, false) 922 s2, _ := newService(t, 1, libp2pServiceOpts{Addressbook: ab2}) 923 s2.SetPickyNotifier(notifier2) 924 925 addr := serviceUnderlayAddress(t, s1) 926 927 // s2 connects to s1, thus the notifier on s1 should be called on Connect 928 _, err := s2.Connect(ctx, addr) 929 if err == nil { 930 t.Fatal("expected connect to fail but it didn't") 931 } 932 933 expectPeers(t, s1) 934 expectPeersEventually(t, s2) 935 936 waitAddrSet(t, &n2disconnectedPeer.Address, &mtx, overlay1) 937 } 938 939 func TestWithDisconnectStreams(t *testing.T) { 940 t.Parallel() 941 942 const headersRWTimeout = 60 * time.Second 943 944 ctx, cancel := context.WithCancel(context.Background()) 945 defer cancel() 946 947 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 948 FullNode: true, 949 HeadersRWTimeout: headersRWTimeout, 950 }}) 951 s2, overlay2 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 952 HeadersRWTimeout: headersRWTimeout, 953 }}) 954 955 testSpec := p2p.ProtocolSpec{ 956 Name: testProtocolName, 957 Version: testProtocolVersion, 958 StreamSpecs: []p2p.StreamSpec{ 959 { 960 Name: testStreamName, 961 Handler: func(c context.Context, p p2p.Peer, s p2p.Stream) error { 962 return nil 963 }, 964 }, 965 }, 966 } 967 968 p2p.WithDisconnectStreams(testSpec) 969 970 _ = s1.AddProtocol(testSpec) 971 972 s1Underlay := serviceUnderlayAddress(t, s1) 973 974 expectPeers(t, s1) 975 expectPeers(t, s2) 976 977 if _, err := s2.Connect(ctx, s1Underlay); err != nil { 978 t.Fatal(err) 979 } 980 981 expectPeers(t, s1, overlay2) 982 expectPeers(t, s2, overlay1) 983 984 s, err := s2.NewStream(ctx, overlay1, nil, testProtocolName, testProtocolVersion, testStreamName) 985 986 expectStreamReset(t, s, err) 987 988 expectPeersEventually(t, s2) 989 expectPeersEventually(t, s1) 990 } 991 992 func TestWithBlocklistStreams(t *testing.T) { 993 t.Parallel() 994 t.Skip("this test always fails") 995 996 ctx, cancel := context.WithCancel(context.Background()) 997 defer cancel() 998 999 s1, overlay1 := newService(t, 1, libp2pServiceOpts{libp2pOpts: libp2p.Options{ 1000 FullNode: true, 1001 }}) 1002 s2, overlay2 := newService(t, 1, libp2pServiceOpts{}) 1003 1004 testSpec := p2p.ProtocolSpec{ 1005 Name: testProtocolName, 1006 Version: testProtocolVersion, 1007 StreamSpecs: []p2p.StreamSpec{ 1008 { 1009 Name: testStreamName, 1010 Handler: func(c context.Context, p p2p.Peer, s p2p.Stream) error { 1011 return nil 1012 }, 1013 }, 1014 }, 1015 } 1016 1017 p2p.WithBlocklistStreams(1*time.Minute, testSpec) 1018 1019 _ = s1.AddProtocol(testSpec) 1020 1021 s1_underlay := serviceUnderlayAddress(t, s1) 1022 1023 if _, err := s2.Connect(ctx, s1_underlay); err != nil { 1024 t.Fatal(err) 1025 } 1026 1027 expectPeers(t, s2, overlay1) 1028 expectPeersEventually(t, s1, overlay2) 1029 1030 s, err := s2.NewStream(ctx, overlay1, nil, testProtocolName, testProtocolVersion, testStreamName) 1031 1032 expectStreamReset(t, s, err) 1033 1034 expectPeersEventually(t, s2) 1035 expectPeersEventually(t, s1) 1036 1037 if _, err := s2.Connect(ctx, s1_underlay); err == nil { 1038 t.Fatal("expected error when connecting to blocklisted peer") 1039 } 1040 1041 expectPeersEventually(t, s2) 1042 expectPeersEventually(t, s1) 1043 } 1044 1045 func TestUserAgentLogging(t *testing.T) { 1046 t.Parallel() 1047 1048 ctx, cancel := context.WithCancel(context.Background()) 1049 defer cancel() 1050 1051 // use concurrent-safe buffers as handlers are logging concurrently 1052 s1Logs := new(buffer) 1053 s2Logs := new(buffer) 1054 1055 s1, _ := newService(t, 1, libp2pServiceOpts{ 1056 libp2pOpts: libp2p.Options{ 1057 FullNode: true, 1058 }, 1059 Logger: log.NewLogger("s1", log.WithSink(s1Logs), log.WithVerbosity(log.VerbosityDebug)), 1060 }) 1061 s2, _ := newService(t, 1, libp2pServiceOpts{ 1062 Logger: log.NewLogger("s2", log.WithSink(s2Logs), log.WithVerbosity(log.VerbosityDebug)), 1063 }) 1064 1065 addr := serviceUnderlayAddress(t, s1) 1066 1067 _, err := s2.Connect(ctx, addr) 1068 if err != nil { 1069 t.Fatal(err) 1070 } 1071 1072 // wait for logs to be written to buffers 1073 err = spinlock.Wait(time.Second*10, func() bool { 1074 return s1Logs.Len() > 0 && s2Logs.Len() > 0 1075 }) 1076 if err != nil { 1077 t.Fatal("timed out waiting for logs to be written") 1078 } 1079 1080 testUserAgentLogLine(t, s1Logs, "(inbound)") 1081 testUserAgentLogLine(t, s2Logs, "(outbound)") 1082 } 1083 1084 func TestReachabilityUpdate(t *testing.T) { 1085 t.Parallel() 1086 1087 s1, _ := newService(t, 1, libp2pServiceOpts{ 1088 libp2pOpts: libp2p.WithHostFactory( 1089 func(_ ...libp2pm.Option) (host.Host, error) { 1090 return bhost.NewHost(swarmt.GenSwarm(t), &bhost.HostOpts{}) 1091 }, 1092 ), 1093 }) 1094 1095 emitReachabilityChanged, _ := s1.Host().EventBus().Emitter(new(event.EvtLocalReachabilityChanged), eventbus.Stateful) 1096 1097 firstUpdate := make(chan struct{}) 1098 secondUpdate := make(chan struct{}) 1099 1100 s1.SetPickyNotifier(mockReachabilityNotifier(func(status p2p.ReachabilityStatus) { 1101 if status == p2p.ReachabilityStatusPublic { 1102 close(firstUpdate) 1103 } 1104 if status == p2p.ReachabilityStatusPrivate { 1105 close(secondUpdate) 1106 } 1107 })) 1108 1109 err := emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPublic}) 1110 if err != nil { 1111 t.Fatal(err) 1112 } 1113 1114 select { 1115 case <-firstUpdate: 1116 case <-time.After(time.Second): 1117 t.Fatalf("test timed out") 1118 } 1119 1120 err = emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPrivate}) 1121 if err != nil { 1122 t.Fatal(err) 1123 } 1124 1125 select { 1126 case <-secondUpdate: 1127 case <-time.After(time.Second): 1128 t.Fatalf("test timed out") 1129 } 1130 } 1131 1132 func testUserAgentLogLine(t *testing.T, logs *buffer, substring string) { 1133 t.Helper() 1134 1135 wantUserAgent := libp2p.UserAgent() 1136 if wantUserAgent == "" { 1137 t.Fatal("libp2p.UserAgent(): got empty user agent") 1138 } 1139 1140 logLineMarker := "successfully connected to peer" 1141 var foundLogLine bool 1142 var lines []string 1143 for { 1144 line, err := logs.ReadString('\n') 1145 if err != nil { 1146 if errors.Is(err, io.EOF) { 1147 break 1148 } 1149 t.Fatal(err) 1150 } 1151 lines = append(lines, line) 1152 if strings.Contains(line, logLineMarker) && strings.Contains(line, substring) { 1153 foundLogLine = true 1154 if !strings.Contains(line, wantUserAgent) { 1155 t.Errorf("log line %q does not contain an expected User Agent %q", line, wantUserAgent) 1156 } 1157 } 1158 } 1159 if !foundLogLine { 1160 t.Errorf("log line with %q and %q strings was not found in %v", logLineMarker, substring, lines) 1161 } 1162 } 1163 1164 // buffer is a bytes.Buffer with some methods exposed that are safe to be used 1165 // concurrently. 1166 type buffer struct { 1167 b bytes.Buffer 1168 m sync.Mutex 1169 } 1170 1171 func (b *buffer) ReadString(delim byte) (string, error) { 1172 b.m.Lock() 1173 defer b.m.Unlock() 1174 return b.b.ReadString(delim) 1175 } 1176 1177 func (b *buffer) Write(p []byte) (int, error) { 1178 b.m.Lock() 1179 defer b.m.Unlock() 1180 return b.b.Write(p) 1181 } 1182 1183 func (b *buffer) Len() int { 1184 b.m.Lock() 1185 defer b.m.Unlock() 1186 return b.b.Len() 1187 } 1188 1189 func expectStreamReset(t *testing.T, s io.ReadCloser, err error) { 1190 t.Helper() 1191 1192 // due to the fact that disconnect method is asynchronous 1193 // stream reset error should occur either on creation or on first read attempt 1194 if err != nil && !errors.Is(err, network.ErrReset) { 1195 t.Fatalf("expected stream reset error, got %v", err) 1196 } 1197 1198 if err == nil { 1199 readErr := make(chan error) 1200 go func() { 1201 _, err := s.Read(make([]byte, 10)) 1202 readErr <- err 1203 }() 1204 1205 select { 1206 // because read could block without erroring we should also expect timeout 1207 case <-time.After(60 * time.Second): 1208 t.Error("expected stream reset error, got timeout reading") 1209 case err := <-readErr: 1210 if !errors.Is(err, network.ErrReset) { 1211 t.Errorf("expected stream reset error, got %v", err) 1212 } 1213 } 1214 } 1215 } 1216 1217 func expectFullNode(t *testing.T, p p2p.Peer) { 1218 t.Helper() 1219 if !p.FullNode { 1220 t.Fatal("expected peer to be a full node") 1221 } 1222 } 1223 1224 func expectZeroAddress(t *testing.T, addrs ...swarm.Address) { 1225 t.Helper() 1226 for i, a := range addrs { 1227 if !a.Equal(swarm.ZeroAddress) { 1228 t.Fatalf("address did not equal zero address. index %d", i) 1229 } 1230 } 1231 } 1232 1233 func waitAddrSet(t *testing.T, addr *swarm.Address, mtx *sync.Mutex, exp swarm.Address) { 1234 t.Helper() 1235 1236 err := spinlock.Wait(time.Second, func() bool { 1237 mtx.Lock() 1238 defer mtx.Unlock() 1239 return addr.Equal(exp) 1240 }) 1241 if err != nil { 1242 t.Fatal("timed out waiting for address to be set") 1243 } 1244 } 1245 1246 func checkAddressbook(t *testing.T, ab addressbook.Getter, overlay swarm.Address, underlay ma.Multiaddr) { 1247 t.Helper() 1248 addr, err := ab.Get(overlay) 1249 if err != nil { 1250 t.Fatal(err) 1251 } 1252 if !addr.Overlay.Equal(overlay) { 1253 t.Fatalf("overlay mismatch. got %s want %s", addr.Overlay, overlay) 1254 } 1255 1256 if !addr.Underlay.Equal(underlay) { 1257 t.Fatalf("underlay mismatch. got %s, want %s", addr.Underlay, underlay) 1258 } 1259 } 1260 1261 type notifiee struct { 1262 connected cFunc 1263 disconnected dFunc 1264 pick bool 1265 announce announceFunc 1266 announceTo announceToFunc 1267 updateReachability reachabilityFunc 1268 reachable reachableFunc 1269 } 1270 1271 func (n *notifiee) Connected(c context.Context, p p2p.Peer, f bool) error { 1272 return n.connected(c, p, f) 1273 } 1274 1275 func (n *notifiee) Disconnected(p p2p.Peer) { 1276 n.disconnected(p) 1277 } 1278 1279 func (n *notifiee) Pick(p2p.Peer) bool { 1280 return n.pick 1281 } 1282 1283 func (n *notifiee) Announce(ctx context.Context, a swarm.Address, full bool) error { 1284 return n.announce(ctx, a, full) 1285 } 1286 1287 func (n *notifiee) AnnounceTo(ctx context.Context, a, b swarm.Address, full bool) error { 1288 return n.announceTo(ctx, a, b, full) 1289 } 1290 1291 func (n *notifiee) UpdateReachability(status p2p.ReachabilityStatus) { 1292 n.updateReachability(status) 1293 } 1294 1295 func (n *notifiee) Reachable(addr swarm.Address, status p2p.ReachabilityStatus) { 1296 n.reachable(addr, status) 1297 } 1298 1299 func mockNotifier(c cFunc, d dFunc, pick bool) p2p.PickyNotifier { 1300 return ¬ifiee{ 1301 connected: c, 1302 disconnected: d, 1303 pick: pick, 1304 announce: noopAnnounce, 1305 announceTo: noopAnnounceTo, 1306 updateReachability: noopReachability, 1307 reachable: noopReachable, 1308 } 1309 } 1310 1311 func mockAnnouncingNotifier(a announceFunc, at announceToFunc) p2p.PickyNotifier { 1312 return ¬ifiee{ 1313 connected: noopCf, 1314 disconnected: noopDf, 1315 pick: true, 1316 announce: a, 1317 announceTo: at, 1318 updateReachability: noopReachability, 1319 reachable: noopReachable, 1320 } 1321 } 1322 1323 func mockReachabilityNotifier(r reachabilityFunc) p2p.PickyNotifier { 1324 return ¬ifiee{ 1325 connected: noopCf, 1326 disconnected: noopDf, 1327 pick: true, 1328 announce: noopAnnounce, 1329 announceTo: noopAnnounceTo, 1330 updateReachability: r, 1331 reachable: noopReachable, 1332 } 1333 } 1334 1335 type ( 1336 cFunc func(context.Context, p2p.Peer, bool) error 1337 dFunc func(p2p.Peer) 1338 announceFunc func(context.Context, swarm.Address, bool) error 1339 announceToFunc func(context.Context, swarm.Address, swarm.Address, bool) error 1340 reachabilityFunc func(p2p.ReachabilityStatus) 1341 reachableFunc func(swarm.Address, p2p.ReachabilityStatus) 1342 ) 1343 1344 var noopCf = func(context.Context, p2p.Peer, bool) error { return nil } 1345 var noopDf = func(p2p.Peer) {} 1346 var noopAnnounce = func(context.Context, swarm.Address, bool) error { return nil } 1347 var noopAnnounceTo = func(context.Context, swarm.Address, swarm.Address, bool) error { return nil } 1348 var noopReachability = func(p2p.ReachabilityStatus) {} 1349 var noopReachable = func(swarm.Address, p2p.ReachabilityStatus) {}