github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/mempool/cat/requests_test.go (about)

     1  package cat
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/badrootd/celestia-core/types"
    10  	"github.com/fortytw2/leaktest"
    11  	"github.com/stretchr/testify/require"
    12  )
    13  
    14  func TestRequestSchedulerRerequest(t *testing.T) {
    15  	var (
    16  		requests        = newRequestScheduler(10*time.Millisecond, 1*time.Minute)
    17  		tx              = types.Tx("tx")
    18  		key             = tx.Key()
    19  		peerA    uint16 = 1 // should be non-zero
    20  		peerB    uint16 = 2
    21  	)
    22  	t.Cleanup(requests.Close)
    23  
    24  	// check zero state
    25  	require.Zero(t, requests.ForTx(key))
    26  	require.False(t, requests.Has(peerA, key))
    27  	// marking a tx that was never requested should return false
    28  	require.False(t, requests.MarkReceived(peerA, key))
    29  
    30  	// create a request
    31  	closeCh := make(chan struct{})
    32  	require.True(t, requests.Add(key, peerA, func(key types.TxKey) {
    33  		require.Equal(t, key, key)
    34  		// the first peer times out to respond so we ask the second peer
    35  		require.True(t, requests.Add(key, peerB, func(key types.TxKey) {
    36  			t.Fatal("did not expect to timeout")
    37  		}))
    38  		close(closeCh)
    39  	}))
    40  
    41  	// check that the request was added
    42  	require.Equal(t, peerA, requests.ForTx(key))
    43  	require.True(t, requests.Has(peerA, key))
    44  
    45  	// should not be able to add the same request again
    46  	require.False(t, requests.Add(key, peerA, nil))
    47  
    48  	// wait for the scheduler to invoke the timeout
    49  	<-closeCh
    50  
    51  	// check that the request still exists
    52  	require.True(t, requests.Has(peerA, key))
    53  	// check that peerB was requested
    54  	require.True(t, requests.Has(peerB, key))
    55  
    56  	// There should still be a request for the Tx
    57  	require.Equal(t, peerB, requests.ForTx(key))
    58  
    59  	// record a response from peerB
    60  	require.True(t, requests.MarkReceived(peerB, key))
    61  
    62  	// peerA comes in later with a response but it's still
    63  	// considered a response from an earlier request
    64  	require.True(t, requests.MarkReceived(peerA, key))
    65  }
    66  
    67  func TestRequestSchedulerNonResponsivePeer(t *testing.T) {
    68  	var (
    69  		requests        = newRequestScheduler(10*time.Millisecond, time.Millisecond)
    70  		tx              = types.Tx("tx")
    71  		key             = tx.Key()
    72  		peerA    uint16 = 1 // should be non-zero
    73  	)
    74  
    75  	require.True(t, requests.Add(key, peerA, nil))
    76  	require.Eventually(t, func() bool {
    77  		return requests.ForTx(key) == 0
    78  	}, 100*time.Millisecond, 5*time.Millisecond)
    79  }
    80  
    81  func TestRequestSchedulerConcurrencyAddsAndReads(t *testing.T) {
    82  	leaktest.CheckTimeout(t, time.Second)()
    83  	requests := newRequestScheduler(10*time.Millisecond, time.Millisecond)
    84  	defer requests.Close()
    85  
    86  	N := 5
    87  	keys := make([]types.TxKey, N)
    88  	for i := 0; i < N; i++ {
    89  		tx := types.Tx(fmt.Sprintf("tx%d", i))
    90  		keys[i] = tx.Key()
    91  	}
    92  
    93  	addWg := sync.WaitGroup{}
    94  	receiveWg := sync.WaitGroup{}
    95  	doneCh := make(chan struct{})
    96  	for i := 1; i < N*N; i++ {
    97  		addWg.Add(1)
    98  		go func(peer uint16) {
    99  			defer addWg.Done()
   100  			requests.Add(keys[int(peer)%N], peer, nil)
   101  		}(uint16(i))
   102  	}
   103  	for i := 1; i < N*N; i++ {
   104  		receiveWg.Add(1)
   105  		go func(peer uint16) {
   106  			defer receiveWg.Done()
   107  			markReceived := func() {
   108  				for _, key := range keys {
   109  					if requests.Has(peer, key) {
   110  						requests.MarkReceived(peer, key)
   111  					}
   112  				}
   113  			}
   114  			for {
   115  				select {
   116  				case <-doneCh:
   117  					// need to ensure this is run
   118  					// at least once after all adds
   119  					// are done
   120  					markReceived()
   121  					return
   122  				default:
   123  					markReceived()
   124  				}
   125  			}
   126  		}(uint16(i))
   127  	}
   128  	addWg.Wait()
   129  	close(doneCh)
   130  
   131  	receiveWg.Wait()
   132  
   133  	for _, key := range keys {
   134  		require.Zero(t, requests.ForTx(key))
   135  	}
   136  }