github.com/celestiaorg/celestia-node@v0.15.0-beta.1/nodebuilder/tests/reconstruct_test.go (about)

     1  //go:build reconstruction || integration
     2  
     3  package tests
     4  
     5  import (
     6  	"context"
     7  	"os"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/libp2p/go-libp2p/core/event"
    12  	"github.com/libp2p/go-libp2p/core/host"
    13  	"github.com/libp2p/go-libp2p/core/peer"
    14  	ma "github.com/multiformats/go-multiaddr"
    15  	"github.com/stretchr/testify/require"
    16  	"golang.org/x/sync/errgroup"
    17  
    18  	"github.com/celestiaorg/celestia-node/nodebuilder"
    19  	"github.com/celestiaorg/celestia-node/nodebuilder/node"
    20  	"github.com/celestiaorg/celestia-node/nodebuilder/p2p"
    21  	"github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp"
    22  	"github.com/celestiaorg/celestia-node/share/availability/light"
    23  	"github.com/celestiaorg/celestia-node/share/eds"
    24  )
    25  
    26  /*
    27  Test-Case: Full Node reconstructs blocks from a Bridge node
    28  Pre-Reqs:
    29  - First 20 blocks have a block size of 16
    30  - Blocktime is 100 ms
    31  Steps:
    32  1. Create a Bridge Node(BN)
    33  2. Start a BN
    34  3. Create a Full Node(FN) with BN as a trusted peer
    35  4. Start a FN
    36  5. Check that a FN can retrieve shares from 1 to 20 blocks
    37  */
    38  func TestFullReconstructFromBridge(t *testing.T) {
    39  	const (
    40  		blocks = 20
    41  		bsize  = 16
    42  		btime  = time.Millisecond * 300
    43  	)
    44  
    45  	ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout)
    46  	t.Cleanup(cancel)
    47  	sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime))
    48  	fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks)
    49  
    50  	bridge := sw.NewBridgeNode()
    51  	err := bridge.Start(ctx)
    52  	require.NoError(t, err)
    53  	bridgeClient := getAdminClient(ctx, bridge, t)
    54  
    55  	// TODO: This is required to avoid flakes coming from unfinished retry
    56  	// mechanism for the same peer in go-header
    57  	_, err = bridgeClient.Header.WaitForHeight(ctx, uint64(blocks))
    58  	require.NoError(t, err)
    59  
    60  	cfg := nodebuilder.DefaultConfig(node.Full)
    61  	cfg.Share.UseShareExchange = false
    62  	cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, getMultiAddr(t, bridge.Host))
    63  	full := sw.NewNodeWithConfig(node.Full, cfg)
    64  	err = full.Start(ctx)
    65  	require.NoError(t, err)
    66  	fullClient := getAdminClient(ctx, full, t)
    67  
    68  	errg, bctx := errgroup.WithContext(ctx)
    69  	for i := 1; i <= blocks+1; i++ {
    70  		i := i
    71  		errg.Go(func() error {
    72  			h, err := fullClient.Header.WaitForHeight(bctx, uint64(i))
    73  			if err != nil {
    74  				return err
    75  			}
    76  
    77  			return fullClient.Share.SharesAvailable(bctx, h)
    78  		})
    79  	}
    80  	require.NoError(t, <-fillDn)
    81  	require.NoError(t, errg.Wait())
    82  }
    83  
    84  /*
    85  Test-Case: Full Node reconstructs blocks from each other, after unsuccessfully syncing the complete
    86  block from LN subnetworks. Analog to TestShareAvailable_DisconnectedFullNodes.
    87  */
    88  func TestFullReconstructFromFulls(t *testing.T) {
    89  	if testing.Short() {
    90  		t.Skip()
    91  	}
    92  
    93  	light.DefaultSampleAmount = 10 // s
    94  	const (
    95  		blocks = 10
    96  		btime  = time.Millisecond * 300
    97  		bsize  = 8  // k
    98  		lnodes = 12 // c - total number of nodes on two subnetworks
    99  	)
   100  
   101  	ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout)
   102  	t.Cleanup(cancel)
   103  
   104  	sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime))
   105  	fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks)
   106  
   107  	const defaultTimeInterval = time.Second * 5
   108  	bridge := sw.NewBridgeNode()
   109  
   110  	sw.SetBootstrapper(t, bridge)
   111  	require.NoError(t, bridge.Start(ctx))
   112  	bridgeClient := getAdminClient(ctx, bridge, t)
   113  
   114  	// TODO: This is required to avoid flakes coming from unfinished retry
   115  	// mechanism for the same peer in go-header
   116  	_, err := bridgeClient.Header.WaitForHeight(ctx, uint64(blocks))
   117  	require.NoError(t, err)
   118  
   119  	lights1 := make([]*nodebuilder.Node, lnodes/2)
   120  	lights2 := make([]*nodebuilder.Node, lnodes/2)
   121  	subs := make([]event.Subscription, lnodes)
   122  	errg, errCtx := errgroup.WithContext(ctx)
   123  	for i := 0; i < lnodes/2; i++ {
   124  		i := i
   125  		errg.Go(func() error {
   126  			lnConfig := nodebuilder.DefaultConfig(node.Light)
   127  			setTimeInterval(lnConfig, defaultTimeInterval)
   128  			light := sw.NewNodeWithConfig(node.Light, lnConfig)
   129  			sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{})
   130  			if err != nil {
   131  				return err
   132  			}
   133  			subs[i] = sub
   134  			lights1[i] = light
   135  			return light.Start(errCtx)
   136  		})
   137  		errg.Go(func() error {
   138  			lnConfig := nodebuilder.DefaultConfig(node.Light)
   139  			setTimeInterval(lnConfig, defaultTimeInterval)
   140  			light := sw.NewNodeWithConfig(node.Light, lnConfig)
   141  			sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{})
   142  			if err != nil {
   143  				return err
   144  			}
   145  			subs[(lnodes/2)+i] = sub
   146  			lights2[i] = light
   147  			return light.Start(errCtx)
   148  		})
   149  	}
   150  
   151  	require.NoError(t, errg.Wait())
   152  
   153  	for i := 0; i < lnodes; i++ {
   154  		select {
   155  		case <-ctx.Done():
   156  			t.Fatal("peer was not found")
   157  		case <-subs[i].Out():
   158  			require.NoError(t, subs[i].Close())
   159  			continue
   160  		}
   161  	}
   162  
   163  	// Remove bootstrappers to prevent FNs from connecting to bridge
   164  	sw.Bootstrappers = []ma.Multiaddr{}
   165  	// Use light nodes from respective subnetworks as bootstrappers to prevent connection to bridge
   166  	lnBootstrapper1, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(lights1[0].Host))
   167  	require.NoError(t, err)
   168  	lnBootstrapper2, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(lights2[0].Host))
   169  	require.NoError(t, err)
   170  
   171  	cfg := nodebuilder.DefaultConfig(node.Full)
   172  	setTimeInterval(cfg, defaultTimeInterval)
   173  	cfg.Share.UseShareExchange = false
   174  	cfg.Share.Discovery.PeersLimit = 0
   175  	cfg.Header.TrustedPeers = []string{lnBootstrapper1[0].String()}
   176  	full1 := sw.NewNodeWithConfig(node.Full, cfg)
   177  	cfg.Header.TrustedPeers = []string{lnBootstrapper2[0].String()}
   178  	full2 := sw.NewNodeWithConfig(node.Full, cfg)
   179  	require.NoError(t, full1.Start(ctx))
   180  	require.NoError(t, full2.Start(ctx))
   181  
   182  	fullClient1 := getAdminClient(ctx, full1, t)
   183  	fullClient2 := getAdminClient(ctx, full2, t)
   184  
   185  	// Form topology
   186  	for i := 0; i < lnodes/2; i++ {
   187  		// Separate light nodes into two subnetworks
   188  		for j := 0; j < lnodes/2; j++ {
   189  			sw.Disconnect(t, lights1[i], lights2[j])
   190  			if i != j {
   191  				sw.Connect(t, lights1[i], lights1[j])
   192  				sw.Connect(t, lights2[i], lights2[j])
   193  			}
   194  		}
   195  
   196  		sw.Connect(t, full1, lights1[i])
   197  		sw.Disconnect(t, full1, lights2[i])
   198  
   199  		sw.Connect(t, full2, lights2[i])
   200  		sw.Disconnect(t, full2, lights1[i])
   201  	}
   202  
   203  	// Ensure the fulls are not connected to the bridge
   204  	sw.Disconnect(t, full1, full2)
   205  	sw.Disconnect(t, full1, bridge)
   206  	sw.Disconnect(t, full2, bridge)
   207  
   208  	h, err := fullClient1.Header.WaitForHeight(ctx, uint64(10+blocks-1))
   209  	require.NoError(t, err)
   210  
   211  	// Ensure that the full nodes cannot reconstruct before being connected to each other
   212  	ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*30)
   213  	errg, errCtx = errgroup.WithContext(ctxErr)
   214  	errg.Go(func() error {
   215  		return fullClient1.Share.SharesAvailable(errCtx, h)
   216  	})
   217  	errg.Go(func() error {
   218  		return fullClient1.Share.SharesAvailable(errCtx, h)
   219  	})
   220  	require.Error(t, errg.Wait())
   221  	cancelErr()
   222  
   223  	// Reconnect FNs
   224  	sw.Connect(t, full1, full2)
   225  
   226  	errg, bctx := errgroup.WithContext(ctx)
   227  	for i := 10; i < blocks+11; i++ {
   228  		h, err := fullClient1.Header.WaitForHeight(bctx, uint64(i))
   229  		require.NoError(t, err)
   230  		errg.Go(func() error {
   231  			return fullClient1.Share.SharesAvailable(bctx, h)
   232  		})
   233  		errg.Go(func() error {
   234  			return fullClient2.Share.SharesAvailable(bctx, h)
   235  		})
   236  	}
   237  
   238  	require.NoError(t, <-fillDn)
   239  	require.NoError(t, errg.Wait())
   240  }
   241  
   242  /*
   243  Test-Case: Full Node reconstructs blocks only from Light Nodes
   244  Pre-Reqs:
   245  - First 20 blocks have a block size of 16
   246  - Blocktime is 100 ms
   247  Steps:
   248  1. Create a Bridge Node(BN)
   249  2. Start a BN
   250  3. Create a Full Node(FN) that will act as a bootstrapper
   251  4. Create 69 Light Nodes(LNs) with BN as a trusted peer and a bootstrapper
   252  5. Start 69 LNs
   253  6. Create a Full Node(FN) with a bootstrapper
   254  7. Unlink FN connection to BN
   255  8. Start a FN
   256  9. Check that the FN can retrieve shares from 1 to 20 blocks
   257  */
   258  func TestFullReconstructFromLights(t *testing.T) {
   259  	if testing.Short() {
   260  		t.Skip()
   261  	}
   262  
   263  	eds.RetrieveQuadrantTimeout = time.Millisecond * 100
   264  	light.DefaultSampleAmount = 20
   265  	const (
   266  		blocks = 20
   267  		btime  = time.Millisecond * 300
   268  		bsize  = 16
   269  		lnodes = 69
   270  	)
   271  
   272  	ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout)
   273  
   274  	t.Cleanup(cancel)
   275  	sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime))
   276  	fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks)
   277  
   278  	const defaultTimeInterval = time.Second * 5
   279  	cfg := nodebuilder.DefaultConfig(node.Full)
   280  	setTimeInterval(cfg, defaultTimeInterval)
   281  
   282  	bridge := sw.NewBridgeNode()
   283  	addrsBridge, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host))
   284  	require.NoError(t, err)
   285  
   286  	os.Setenv(p2p.EnvKeyCelestiaBootstrapper, "true")
   287  	cfg.Header.TrustedPeers = []string{
   288  		"/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p",
   289  	}
   290  	bootstrapper := sw.NewNodeWithConfig(node.Full, cfg)
   291  	require.NoError(t, bootstrapper.Start(ctx))
   292  	bootstrapperAddr := host.InfoFromHost(bootstrapper.Host)
   293  
   294  	require.NoError(t, bridge.Start(ctx))
   295  	bridgeClient := getAdminClient(ctx, bridge, t)
   296  
   297  	// TODO: This is required to avoid flakes coming from unfinished retry
   298  	// mechanism for the same peer in go-header
   299  	_, err = bridgeClient.Header.WaitForHeight(ctx, uint64(blocks))
   300  	require.NoError(t, err)
   301  
   302  	cfg = nodebuilder.DefaultConfig(node.Full)
   303  	setTimeInterval(cfg, defaultTimeInterval)
   304  	cfg.Share.UseShareExchange = false
   305  	cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrsBridge[0].String())
   306  	nodesConfig := nodebuilder.WithBootstrappers([]peer.AddrInfo{*bootstrapperAddr})
   307  	full := sw.NewNodeWithConfig(node.Full, cfg, nodesConfig)
   308  	os.Setenv(p2p.EnvKeyCelestiaBootstrapper, "false")
   309  
   310  	lights := make([]*nodebuilder.Node, lnodes)
   311  	subs := make([]event.Subscription, lnodes)
   312  	errg, errCtx := errgroup.WithContext(ctx)
   313  	for i := 0; i < lnodes; i++ {
   314  		i := i
   315  		errg.Go(func() error {
   316  			lnConfig := nodebuilder.DefaultConfig(node.Light)
   317  			setTimeInterval(lnConfig, defaultTimeInterval)
   318  			lnConfig.Header.TrustedPeers = append(lnConfig.Header.TrustedPeers, addrsBridge[0].String())
   319  			light := sw.NewNodeWithConfig(node.Light, lnConfig, nodesConfig)
   320  			sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{})
   321  			if err != nil {
   322  				return err
   323  			}
   324  			subs[i] = sub
   325  			lights[i] = light
   326  			return light.Start(errCtx)
   327  		})
   328  	}
   329  
   330  	require.NoError(t, errg.Wait())
   331  	require.NoError(t, full.Start(ctx))
   332  	fullClient := getAdminClient(ctx, full, t)
   333  
   334  	for i := 0; i < lnodes; i++ {
   335  		select {
   336  		case <-ctx.Done():
   337  			t.Fatal("peer was not found")
   338  		case <-subs[i].Out():
   339  			require.NoError(t, subs[i].Close())
   340  			continue
   341  		}
   342  	}
   343  	errg, bctx := errgroup.WithContext(ctx)
   344  	for i := 1; i <= blocks+1; i++ {
   345  		i := i
   346  		errg.Go(func() error {
   347  			h, err := fullClient.Header.WaitForHeight(bctx, uint64(i))
   348  			if err != nil {
   349  				return err
   350  			}
   351  
   352  			return fullClient.Share.SharesAvailable(bctx, h)
   353  		})
   354  	}
   355  	require.NoError(t, <-fillDn)
   356  	require.NoError(t, errg.Wait())
   357  }
   358  
   359  func getMultiAddr(t *testing.T, h host.Host) string {
   360  	addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(h))
   361  	require.NoError(t, err)
   362  	return addrs[0].String()
   363  }