github.com/celestiaorg/celestia-node@v0.15.0-beta.1/share/p2p/shrexeds/exchange_test.go (about) 1 package shrexeds 2 3 import ( 4 "context" 5 "sync" 6 "testing" 7 "time" 8 9 "github.com/ipfs/go-datastore" 10 ds_sync "github.com/ipfs/go-datastore/sync" 11 libhost "github.com/libp2p/go-libp2p/core/host" 12 "github.com/libp2p/go-libp2p/core/network" 13 mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 17 "github.com/celestiaorg/celestia-node/share" 18 "github.com/celestiaorg/celestia-node/share/eds" 19 "github.com/celestiaorg/celestia-node/share/eds/edstest" 20 "github.com/celestiaorg/celestia-node/share/p2p" 21 ) 22 23 func TestExchange_RequestEDS(t *testing.T) { 24 ctx, cancel := context.WithCancel(context.Background()) 25 t.Cleanup(cancel) 26 store, client, server := makeExchange(t) 27 28 err := store.Start(ctx) 29 require.NoError(t, err) 30 31 err = server.Start(ctx) 32 require.NoError(t, err) 33 34 // Testcase: EDS is immediately available 35 t.Run("EDS_Available", func(t *testing.T) { 36 eds := edstest.RandEDS(t, 4) 37 dah, err := share.NewRoot(eds) 38 require.NoError(t, err) 39 err = store.Put(ctx, dah.Hash(), eds) 40 require.NoError(t, err) 41 42 requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) 43 assert.NoError(t, err) 44 assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) 45 }) 46 47 // Testcase: EDS is unavailable initially, but is found after multiple requests 48 t.Run("EDS_AvailableAfterDelay", func(t *testing.T) { 49 eds := edstest.RandEDS(t, 4) 50 dah, err := share.NewRoot(eds) 51 require.NoError(t, err) 52 53 lock := make(chan struct{}) 54 go func() { 55 <-lock 56 err = store.Put(ctx, dah.Hash(), eds) 57 require.NoError(t, err) 58 lock <- struct{}{} 59 }() 60 61 requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) 62 assert.ErrorIs(t, err, p2p.ErrNotFound) 63 assert.Nil(t, requestedEDS) 64 65 // unlock write 66 lock <- struct{}{} 67 // wait for write to finish 68 <-lock 69 70 requestedEDS, err = client.RequestEDS(ctx, dah.Hash(), server.host.ID()) 71 assert.NoError(t, err) 72 assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) 73 }) 74 75 // Testcase: Invalid request excludes peer from round-robin, stopping request 76 t.Run("EDS_InvalidRequest", func(t *testing.T) { 77 dataHash := []byte("invalid") 78 requestedEDS, err := client.RequestEDS(ctx, dataHash, server.host.ID()) 79 assert.ErrorContains(t, err, "stream reset") 80 assert.Nil(t, requestedEDS) 81 }) 82 83 t.Run("EDS_err_not_found", func(t *testing.T) { 84 timeoutCtx, cancel := context.WithTimeout(ctx, time.Second) 85 t.Cleanup(cancel) 86 eds := edstest.RandEDS(t, 4) 87 dah, err := share.NewRoot(eds) 88 require.NoError(t, err) 89 _, err = client.RequestEDS(timeoutCtx, dah.Hash(), server.host.ID()) 90 require.ErrorIs(t, err, p2p.ErrNotFound) 91 }) 92 93 // Testcase: Concurrency limit reached 94 t.Run("EDS_concurrency_limit", func(t *testing.T) { 95 store, client, server := makeExchange(t) 96 97 require.NoError(t, store.Start(ctx)) 98 require.NoError(t, server.Start(ctx)) 99 100 ctx, cancel := context.WithTimeout(ctx, time.Second) 101 t.Cleanup(cancel) 102 103 rateLimit := 2 104 wg := sync.WaitGroup{} 105 wg.Add(rateLimit) 106 107 // mockHandler will block requests on server side until test is over 108 lock := make(chan struct{}) 109 defer close(lock) 110 mockHandler := func(network.Stream) { 111 wg.Done() 112 select { 113 case <-lock: 114 case <-ctx.Done(): 115 t.Fatal("timeout") 116 } 117 } 118 middleware := p2p.NewMiddleware(rateLimit) 119 server.host.SetStreamHandler(server.protocolID, 120 middleware.RateLimitHandler(mockHandler)) 121 122 // take server concurrency slots with blocked requests 123 for i := 0; i < rateLimit; i++ { 124 go func(i int) { 125 client.RequestEDS(ctx, nil, server.host.ID()) //nolint:errcheck 126 }(i) 127 } 128 129 // wait until all server slots are taken 130 wg.Wait() 131 _, err = client.RequestEDS(ctx, nil, server.host.ID()) 132 require.ErrorIs(t, err, p2p.ErrNotFound) 133 }) 134 } 135 136 func newStore(t *testing.T) *eds.Store { 137 t.Helper() 138 139 storeCfg := eds.DefaultParameters() 140 ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) 141 store, err := eds.NewStore(storeCfg, t.TempDir(), ds) 142 require.NoError(t, err) 143 return store 144 } 145 146 func createMocknet(t *testing.T, amount int) []libhost.Host { 147 t.Helper() 148 149 net, err := mocknet.FullMeshConnected(amount) 150 require.NoError(t, err) 151 // get host and peer 152 return net.Hosts() 153 } 154 155 func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { 156 t.Helper() 157 store := newStore(t) 158 hosts := createMocknet(t, 2) 159 160 client, err := NewClient(DefaultParameters(), hosts[0]) 161 require.NoError(t, err) 162 server, err := NewServer(DefaultParameters(), hosts[1], store) 163 require.NoError(t, err) 164 165 return store, client, server 166 }