github.com/ethersphere/bee/v2@v2.2.0/pkg/salud/salud_test.go (about)

     1  // Copyright 2023 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package salud_test
     6  
     7  import (
     8  	"context"
     9  	"errors"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/ethersphere/bee/v2/pkg/log"
    14  	"github.com/ethersphere/bee/v2/pkg/salud"
    15  	"github.com/ethersphere/bee/v2/pkg/spinlock"
    16  	"github.com/ethersphere/bee/v2/pkg/status"
    17  	mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock"
    18  	"github.com/ethersphere/bee/v2/pkg/swarm"
    19  	topMock "github.com/ethersphere/bee/v2/pkg/topology/mock"
    20  )
    21  
    22  type peer struct {
    23  	addr    swarm.Address
    24  	status  *status.Snapshot
    25  	waitDur int
    26  	health  bool
    27  }
    28  
    29  func TestSalud(t *testing.T) {
    30  	t.Parallel()
    31  	peers := []peer{
    32  		// fully healhy
    33  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true},
    34  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true},
    35  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true},
    36  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true},
    37  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true},
    38  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true},
    39  
    40  		// healthy since radius >= most common radius -  1
    41  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 7, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, true},
    42  
    43  		// radius too low
    44  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 6, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, false},
    45  
    46  		// dur too long
    47  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 2, false},
    48  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 2, false},
    49  
    50  		// connections not enough
    51  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 90, StorageRadius: 8, BeeMode: "full", BatchCommitment: 50, ReserveSize: 100}, 1, false},
    52  
    53  		// commitment wrong
    54  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", BatchCommitment: 35, ReserveSize: 100}, 1, false},
    55  	}
    56  
    57  	statusM := &statusMock{make(map[string]peer)}
    58  
    59  	addrs := make([]swarm.Address, 0, len(peers))
    60  	for _, p := range peers {
    61  		addrs = append(addrs, p.addr)
    62  		statusM.peers[p.addr.ByteString()] = p
    63  	}
    64  
    65  	topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...))
    66  
    67  	reserve := mockstorer.NewReserve(
    68  		mockstorer.WithRadius(8),
    69  		mockstorer.WithReserveSize(100),
    70  	)
    71  
    72  	service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8)
    73  
    74  	err := spinlock.Wait(time.Minute, func() bool {
    75  		return len(topM.PeersHealth()) == len(peers)
    76  	})
    77  	if err != nil {
    78  		t.Fatal(err)
    79  	}
    80  
    81  	for _, p := range peers {
    82  		if want, got := p.health, topM.PeersHealth()[p.addr.ByteString()]; want != got {
    83  			t.Fatalf("got health %v, want %v for peer %s, %v", got, want, p.addr, p.status)
    84  		}
    85  	}
    86  
    87  	if !service.IsHealthy() {
    88  		t.Fatalf("self should be healthy")
    89  	}
    90  
    91  	if err := service.Close(); err != nil {
    92  		t.Fatal(err)
    93  	}
    94  }
    95  
    96  func TestSelfUnhealthyRadius(t *testing.T) {
    97  	t.Parallel()
    98  	peers := []peer{
    99  		// fully healhy
   100  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full"}, 0, true},
   101  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full"}, 0, true},
   102  	}
   103  
   104  	statusM := &statusMock{make(map[string]peer)}
   105  	addrs := make([]swarm.Address, 0, len(peers))
   106  	for _, p := range peers {
   107  		addrs = append(addrs, p.addr)
   108  		statusM.peers[p.addr.ByteString()] = p
   109  	}
   110  
   111  	topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...))
   112  
   113  	reserve := mockstorer.NewReserve(
   114  		mockstorer.WithRadius(7),
   115  		mockstorer.WithReserveSize(100),
   116  	)
   117  
   118  	service := salud.New(statusM, topM, reserve, log.Noop, -1, "full", 0, 0.8, 0.8)
   119  
   120  	err := spinlock.Wait(time.Minute, func() bool {
   121  		return len(topM.PeersHealth()) == len(peers)
   122  	})
   123  	if err != nil {
   124  		t.Fatal(err)
   125  	}
   126  
   127  	if service.IsHealthy() {
   128  		t.Fatalf("self should NOT be healthy")
   129  	}
   130  
   131  	if err := service.Close(); err != nil {
   132  		t.Fatal(err)
   133  	}
   134  }
   135  
   136  func TestSubToRadius(t *testing.T) {
   137  	t.Parallel()
   138  	peers := []peer{
   139  		// fully healhy
   140  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", ReserveSize: 100}, 0, true},
   141  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", ReserveSize: 100}, 0, true},
   142  	}
   143  
   144  	addrs := make([]swarm.Address, 0, len(peers))
   145  	for _, p := range peers {
   146  		addrs = append(addrs, p.addr)
   147  	}
   148  
   149  	topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...))
   150  
   151  	service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8)
   152  
   153  	c, unsub := service.SubscribeNetworkStorageRadius()
   154  	t.Cleanup(unsub)
   155  
   156  	select {
   157  	case radius := <-c:
   158  		if radius != 8 {
   159  			t.Fatalf("wanted radius 8, got %d", radius)
   160  		}
   161  	case <-time.After(time.Second):
   162  	}
   163  
   164  	if err := service.Close(); err != nil {
   165  		t.Fatal(err)
   166  	}
   167  }
   168  
   169  func TestUnsub(t *testing.T) {
   170  	t.Parallel()
   171  	peers := []peer{
   172  		// fully healhy
   173  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", ReserveSize: 100}, 0, true},
   174  		{swarm.RandAddress(t), &status.Snapshot{ConnectedPeers: 100, StorageRadius: 8, BeeMode: "full", ReserveSize: 100}, 0, true},
   175  	}
   176  
   177  	addrs := make([]swarm.Address, 0, len(peers))
   178  	for _, p := range peers {
   179  		addrs = append(addrs, p.addr)
   180  	}
   181  
   182  	topM := topMock.NewTopologyDriver(topMock.WithPeers(addrs...))
   183  
   184  	service := salud.New(&statusMock{make(map[string]peer)}, topM, mockstorer.NewReserve(), log.Noop, -1, "full", 0, 0.8, 0.8)
   185  
   186  	c, unsub := service.SubscribeNetworkStorageRadius()
   187  	unsub()
   188  
   189  	select {
   190  	case <-c:
   191  		t.Fatal("should not have received an address")
   192  	case <-time.After(time.Second):
   193  	}
   194  
   195  	if err := service.Close(); err != nil {
   196  		t.Fatal(err)
   197  	}
   198  }
   199  
   200  type statusMock struct {
   201  	peers map[string]peer
   202  }
   203  
   204  func (p *statusMock) PeerSnapshot(ctx context.Context, peer swarm.Address) (*status.Snapshot, error) {
   205  	if peer, ok := p.peers[peer.ByteString()]; ok {
   206  		time.Sleep(time.Duration(peer.waitDur) * time.Millisecond * 100)
   207  		return peer.status, nil
   208  	}
   209  	return nil, errors.New("peer not found")
   210  }