github.com/celestiaorg/celestia-node@v0.15.0-beta.1/share/availability/test/testing.go (about)

     1  package availability_test
     2  
     3  import (
     4  	"context"
     5  	"testing"
     6  
     7  	"github.com/ipfs/boxo/bitswap"
     8  	"github.com/ipfs/boxo/bitswap/network"
     9  	"github.com/ipfs/boxo/blockservice"
    10  	"github.com/ipfs/boxo/blockstore"
    11  	"github.com/ipfs/boxo/routing/offline"
    12  	ds "github.com/ipfs/go-datastore"
    13  	dssync "github.com/ipfs/go-datastore/sync"
    14  	record "github.com/libp2p/go-libp2p-record"
    15  	"github.com/libp2p/go-libp2p/core/host"
    16  	"github.com/libp2p/go-libp2p/core/peer"
    17  	mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
    18  	"github.com/stretchr/testify/require"
    19  
    20  	"github.com/celestiaorg/celestia-node/share"
    21  	"github.com/celestiaorg/celestia-node/share/ipld"
    22  	"github.com/celestiaorg/celestia-node/share/sharetest"
    23  )
    24  
    25  // RandFillBS fills the given BlockService with a random block of a given size.
    26  func RandFillBS(t *testing.T, n int, bServ blockservice.BlockService) *share.Root {
    27  	shares := sharetest.RandShares(t, n*n)
    28  	return FillBS(t, bServ, shares)
    29  }
    30  
    31  // FillBS fills the given BlockService with the given shares.
    32  func FillBS(t *testing.T, bServ blockservice.BlockService, shares []share.Share) *share.Root {
    33  	eds, err := ipld.AddShares(context.TODO(), shares, bServ)
    34  	require.NoError(t, err)
    35  	dah, err := share.NewRoot(eds)
    36  	require.NoError(t, err)
    37  	return dah
    38  }
    39  
    40  type TestNode struct {
    41  	net *TestDagNet
    42  	share.Getter
    43  	share.Availability
    44  	blockservice.BlockService
    45  	host.Host
    46  }
    47  
    48  // ClearStorage cleans up the storage of the node.
    49  func (n *TestNode) ClearStorage() {
    50  	keys, err := n.Blockstore().AllKeysChan(n.net.ctx)
    51  	require.NoError(n.net.T, err)
    52  
    53  	for k := range keys {
    54  		err := n.DeleteBlock(n.net.ctx, k)
    55  		require.NoError(n.net.T, err)
    56  	}
    57  }
    58  
    59  type TestDagNet struct {
    60  	ctx   context.Context
    61  	T     *testing.T
    62  	net   mocknet.Mocknet
    63  	nodes []*TestNode
    64  }
    65  
    66  // NewTestDAGNet creates a new testing swarm utility to spawn different nodes and test how they
    67  // interact and/or exchange data.
    68  func NewTestDAGNet(ctx context.Context, t *testing.T) *TestDagNet {
    69  	return &TestDagNet{
    70  		ctx: ctx,
    71  		T:   t,
    72  		net: mocknet.New(),
    73  	}
    74  }
    75  
    76  // NewTestNodeWithBlockstore creates a new plain TestNode with the given blockstore that can serve
    77  // and request data.
    78  func (dn *TestDagNet) NewTestNodeWithBlockstore(dstore ds.Datastore, bstore blockstore.Blockstore) *TestNode {
    79  	hst, err := dn.net.GenPeer()
    80  	require.NoError(dn.T, err)
    81  	routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{})
    82  	bs := bitswap.New(
    83  		dn.ctx,
    84  		network.NewFromIpfsHost(hst, routing),
    85  		bstore,
    86  		bitswap.ProvideEnabled(false),          // disable routines for DHT content provides, as we don't use them
    87  		bitswap.EngineBlockstoreWorkerCount(1), // otherwise it spawns 128 routines which is too much for tests
    88  		bitswap.EngineTaskWorkerCount(2),
    89  		bitswap.TaskWorkerCount(2),
    90  		bitswap.SetSimulateDontHavesOnTimeout(false),
    91  		bitswap.SetSendDontHaves(false),
    92  	)
    93  	nd := &TestNode{
    94  		net:          dn,
    95  		BlockService: ipld.NewBlockservice(bstore, bs),
    96  		Host:         hst,
    97  	}
    98  	dn.nodes = append(dn.nodes, nd)
    99  	return nd
   100  }
   101  
   102  // NewTestNode creates a plain network node that can serve and request data.
   103  func (dn *TestDagNet) NewTestNode() *TestNode {
   104  	dstore := dssync.MutexWrap(ds.NewMapDatastore())
   105  	bstore := blockstore.NewBlockstore(dstore)
   106  	return dn.NewTestNodeWithBlockstore(dstore, bstore)
   107  }
   108  
   109  // ConnectAll connects all the peers on registered on the TestDagNet.
   110  func (dn *TestDagNet) ConnectAll() {
   111  	err := dn.net.LinkAll()
   112  	require.NoError(dn.T, err)
   113  
   114  	err = dn.net.ConnectAllButSelf()
   115  	require.NoError(dn.T, err)
   116  }
   117  
   118  // Connect connects two given peers.
   119  func (dn *TestDagNet) Connect(peerA, peerB peer.ID) {
   120  	_, err := dn.net.LinkPeers(peerA, peerB)
   121  	require.NoError(dn.T, err)
   122  	_, err = dn.net.ConnectPeers(peerA, peerB)
   123  	require.NoError(dn.T, err)
   124  }
   125  
   126  // Disconnect disconnects two peers.
   127  // It does a hard disconnect, meaning that disconnected peers won't be able to reconnect on their
   128  // own but only with DagNet.Connect or TestDagNet.ConnectAll.
   129  func (dn *TestDagNet) Disconnect(peerA, peerB peer.ID) {
   130  	err := dn.net.UnlinkPeers(peerA, peerB)
   131  	require.NoError(dn.T, err)
   132  	err = dn.net.DisconnectPeers(peerA, peerB)
   133  	require.NoError(dn.T, err)
   134  }
   135  
   136  type SubNet struct {
   137  	*TestDagNet
   138  	nodes []*TestNode
   139  }
   140  
   141  func (dn *TestDagNet) SubNet() *SubNet {
   142  	return &SubNet{dn, nil}
   143  }
   144  
   145  func (sn *SubNet) AddNode(nd *TestNode) {
   146  	sn.nodes = append(sn.nodes, nd)
   147  }
   148  
   149  func (sn *SubNet) ConnectAll() {
   150  	nodes := sn.nodes
   151  	for _, n1 := range nodes {
   152  		for _, n2 := range nodes {
   153  			if n1 == n2 {
   154  				continue
   155  			}
   156  			_, err := sn.net.LinkPeers(n1.ID(), n2.ID())
   157  			require.NoError(sn.T, err)
   158  
   159  			_, err = sn.net.ConnectPeers(n1.ID(), n2.ID())
   160  			require.NoError(sn.T, err)
   161  		}
   162  	}
   163  }