github.com/celestiaorg/celestia-node@v0.15.0-beta.1/nodebuilder/tests/nd_test.go (about)

     1  //go:build nd || integration
     2  
     3  package tests
     4  
     5  import (
     6  	"context"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/libp2p/go-libp2p/core/host"
    11  	"github.com/libp2p/go-libp2p/core/network"
    12  	"github.com/stretchr/testify/require"
    13  	"go.uber.org/fx"
    14  
    15  	"github.com/celestiaorg/celestia-node/nodebuilder"
    16  	"github.com/celestiaorg/celestia-node/nodebuilder/node"
    17  	"github.com/celestiaorg/celestia-node/nodebuilder/p2p"
    18  	"github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp"
    19  	"github.com/celestiaorg/celestia-node/share"
    20  	"github.com/celestiaorg/celestia-node/share/eds"
    21  	"github.com/celestiaorg/celestia-node/share/getters"
    22  	"github.com/celestiaorg/celestia-node/share/p2p/shrexnd"
    23  )
    24  
    25  func TestShrexNDFromLights(t *testing.T) {
    26  	const (
    27  		blocks = 10
    28  		btime  = time.Millisecond * 300
    29  		bsize  = 16
    30  	)
    31  
    32  	ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout)
    33  	t.Cleanup(cancel)
    34  
    35  	sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime))
    36  	fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks)
    37  
    38  	bridge := sw.NewBridgeNode()
    39  	sw.SetBootstrapper(t, bridge)
    40  
    41  	cfg := nodebuilder.DefaultConfig(node.Light)
    42  	cfg.Share.Discovery.PeersLimit = 1
    43  	light := sw.NewNodeWithConfig(node.Light, cfg)
    44  
    45  	err := bridge.Start(ctx)
    46  	require.NoError(t, err)
    47  	err = light.Start(ctx)
    48  	require.NoError(t, err)
    49  
    50  	bridgeClient := getAdminClient(ctx, bridge, t)
    51  	lightClient := getAdminClient(ctx, light, t)
    52  
    53  	// wait for chain to be filled
    54  	require.NoError(t, <-fillDn)
    55  
    56  	// first 15 blocks are not filled with data
    57  	//
    58  	// TODO: we need to stop guessing
    59  	// the block that actually has transactions. We can get this data from the
    60  	// response returned by FillBlock.
    61  	for i := 16; i < blocks; i++ {
    62  		h, err := bridgeClient.Header.GetByHeight(ctx, uint64(i))
    63  		require.NoError(t, err)
    64  
    65  		reqCtx, cancel := context.WithTimeout(ctx, time.Second*5)
    66  
    67  		// ensure to fetch random namespace (not the reserved namespace)
    68  		namespace := h.DAH.RowRoots[1][:share.NamespaceSize]
    69  
    70  		expected, err := bridgeClient.Share.GetSharesByNamespace(reqCtx, h, namespace)
    71  		require.NoError(t, err)
    72  		got, err := lightClient.Share.GetSharesByNamespace(reqCtx, h, namespace)
    73  		require.NoError(t, err)
    74  
    75  		require.True(t, len(got[0].Shares) > 0)
    76  		require.Equal(t, expected, got)
    77  
    78  		cancel()
    79  	}
    80  }
    81  
    82  func TestShrexNDFromLightsWithBadFulls(t *testing.T) {
    83  	const (
    84  		blocks        = 10
    85  		btime         = time.Millisecond * 300
    86  		bsize         = 16
    87  		amountOfFulls = 5
    88  		testTimeout   = time.Second * 10
    89  	)
    90  
    91  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
    92  	t.Cleanup(cancel)
    93  
    94  	sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime))
    95  	fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks)
    96  
    97  	bridge := sw.NewBridgeNode()
    98  	sw.SetBootstrapper(t, bridge)
    99  
   100  	// create full nodes with basic stream.reset handler
   101  	ndHandler := func(stream network.Stream) {
   102  		_ = stream.Reset()
   103  	}
   104  	fulls := make([]*nodebuilder.Node, 0, amountOfFulls)
   105  	for i := 0; i < amountOfFulls; i++ {
   106  		cfg := nodebuilder.DefaultConfig(node.Full)
   107  		setTimeInterval(cfg, testTimeout)
   108  		full := sw.NewNodeWithConfig(node.Full, cfg, replaceNDServer(cfg, ndHandler), replaceShareGetter())
   109  		fulls = append(fulls, full)
   110  	}
   111  
   112  	lnConfig := nodebuilder.DefaultConfig(node.Light)
   113  	lnConfig.Share.Discovery.PeersLimit = uint(amountOfFulls)
   114  	light := sw.NewNodeWithConfig(node.Light, lnConfig)
   115  
   116  	// start all nodes
   117  	require.NoError(t, bridge.Start(ctx))
   118  	require.NoError(t, startFullNodes(ctx, fulls...))
   119  	require.NoError(t, light.Start(ctx))
   120  
   121  	bridgeClient := getAdminClient(ctx, bridge, t)
   122  	lightClient := getAdminClient(ctx, light, t)
   123  
   124  	// wait for chain to fill up
   125  	require.NoError(t, <-fillDn)
   126  
   127  	// first 2 blocks are not filled with data
   128  	for i := 3; i < blocks; i++ {
   129  		h, err := bridgeClient.Header.GetByHeight(ctx, uint64(i))
   130  		require.NoError(t, err)
   131  
   132  		if len(h.DAH.RowRoots) != bsize*2 {
   133  			// fill blocks does not always fill every block to the given block
   134  			// size - this check prevents trying to fetch shares for the parity
   135  			// namespace.
   136  			continue
   137  		}
   138  
   139  		reqCtx, cancel := context.WithTimeout(ctx, time.Second*5)
   140  
   141  		// ensure to fetch random namespace (not the reserved namespace)
   142  		namespace := h.DAH.RowRoots[1][:share.NamespaceSize]
   143  
   144  		expected, err := bridgeClient.Share.GetSharesByNamespace(reqCtx, h, namespace)
   145  		require.NoError(t, err)
   146  		require.True(t, len(expected[0].Shares) > 0)
   147  
   148  		// choose a random full to test
   149  		fN := fulls[len(fulls)/2]
   150  		fnClient := getAdminClient(ctx, fN, t)
   151  		gotFull, err := fnClient.Share.GetSharesByNamespace(reqCtx, h, namespace)
   152  		require.NoError(t, err)
   153  		require.True(t, len(gotFull[0].Shares) > 0)
   154  
   155  		gotLight, err := lightClient.Share.GetSharesByNamespace(reqCtx, h, namespace)
   156  		require.NoError(t, err)
   157  		require.True(t, len(gotLight[0].Shares) > 0)
   158  
   159  		require.Equal(t, expected, gotFull)
   160  		require.Equal(t, expected, gotLight)
   161  
   162  		cancel()
   163  	}
   164  }
   165  
   166  func startFullNodes(ctx context.Context, fulls ...*nodebuilder.Node) error {
   167  	for _, full := range fulls {
   168  		err := full.Start(ctx)
   169  		if err != nil {
   170  			return err
   171  		}
   172  	}
   173  	return nil
   174  }
   175  
   176  func replaceNDServer(cfg *nodebuilder.Config, handler network.StreamHandler) fx.Option {
   177  	return fx.Decorate(fx.Annotate(
   178  		func(
   179  			host host.Host,
   180  			store *eds.Store,
   181  			network p2p.Network,
   182  		) (*shrexnd.Server, error) {
   183  			cfg.Share.ShrExNDParams.WithNetworkID(network.String())
   184  			return shrexnd.NewServer(cfg.Share.ShrExNDParams, host, store)
   185  		},
   186  		fx.OnStart(func(ctx context.Context, server *shrexnd.Server) error {
   187  			// replace handler for server
   188  			server.SetHandler(handler)
   189  			return server.Start(ctx)
   190  		}),
   191  		fx.OnStop(func(ctx context.Context, server *shrexnd.Server) error {
   192  			return server.Start(ctx)
   193  		}),
   194  	))
   195  }
   196  
   197  func replaceShareGetter() fx.Option {
   198  	return fx.Decorate(fx.Annotate(
   199  		func(
   200  			host host.Host,
   201  			store *eds.Store,
   202  			storeGetter *getters.StoreGetter,
   203  			shrexGetter *getters.ShrexGetter,
   204  			network p2p.Network,
   205  		) share.Getter {
   206  			cascade := make([]share.Getter, 0, 2)
   207  			cascade = append(cascade, storeGetter)
   208  			cascade = append(cascade, shrexGetter)
   209  			return getters.NewCascadeGetter(cascade)
   210  		},
   211  	))
   212  }