github.com/status-im/status-go@v1.1.0/wakuv2/waku_test.go (about)

     1  package wakuv2
     2  
     3  import (
     4  	"context"
     5  	"crypto/rand"
     6  	"encoding/json"
     7  	"errors"
     8  	"math/big"
     9  	"os"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"go.uber.org/zap"
    15  
    16  	"github.com/cenkalti/backoff/v3"
    17  	"github.com/libp2p/go-libp2p/core/metrics"
    18  	"github.com/libp2p/go-libp2p/core/peer"
    19  	libp2pprotocol "github.com/libp2p/go-libp2p/core/protocol"
    20  
    21  	"github.com/ethereum/go-ethereum/common/hexutil"
    22  	"github.com/ethereum/go-ethereum/crypto"
    23  	ethdnsdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc"
    24  	"github.com/ethereum/go-ethereum/p2p/enode"
    25  
    26  	"github.com/stretchr/testify/require"
    27  	"golang.org/x/exp/maps"
    28  	"google.golang.org/protobuf/proto"
    29  
    30  	"github.com/waku-org/go-waku/waku/v2/dnsdisc"
    31  	wps "github.com/waku-org/go-waku/waku/v2/peerstore"
    32  	"github.com/waku-org/go-waku/waku/v2/protocol"
    33  	"github.com/waku-org/go-waku/waku/v2/protocol/filter"
    34  	"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
    35  	"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
    36  	"github.com/waku-org/go-waku/waku/v2/protocol/pb"
    37  	"github.com/waku-org/go-waku/waku/v2/protocol/relay"
    38  	"github.com/waku-org/go-waku/waku/v2/protocol/store"
    39  
    40  	"github.com/status-im/status-go/appdatabase"
    41  	"github.com/status-im/status-go/connection"
    42  	"github.com/status-im/status-go/eth-node/types"
    43  	"github.com/status-im/status-go/protocol/tt"
    44  	"github.com/status-im/status-go/t/helpers"
    45  	"github.com/status-im/status-go/wakuv2/common"
    46  )
    47  
    48  var testStoreENRBootstrap = "enrtree://AI4W5N5IFEUIHF5LESUAOSMV6TKWF2MB6GU2YK7PU4TYUGUNOCEPW@store.staging.status.nodes.status.im"
    49  var testBootENRBootstrap = "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.staging.status.nodes.status.im"
    50  
    51  func setDefaultConfig(config *Config, lightMode bool) {
    52  	config.ClusterID = 16
    53  
    54  	if lightMode {
    55  		config.EnablePeerExchangeClient = true
    56  		config.LightClient = true
    57  		config.EnableDiscV5 = false
    58  	} else {
    59  		config.EnableDiscV5 = true
    60  		config.EnablePeerExchangeServer = true
    61  		config.LightClient = false
    62  		config.EnablePeerExchangeClient = false
    63  	}
    64  }
    65  
    66  func TestDiscoveryV5(t *testing.T) {
    67  	config := &Config{}
    68  	setDefaultConfig(config, false)
    69  	config.DiscV5BootstrapNodes = []string{testStoreENRBootstrap}
    70  	config.DiscoveryLimit = 20
    71  	w, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
    72  	require.NoError(t, err)
    73  
    74  	require.NoError(t, w.Start())
    75  
    76  	err = tt.RetryWithBackOff(func() error {
    77  		if len(w.Peers()) == 0 {
    78  			return errors.New("no peers discovered")
    79  		}
    80  		return nil
    81  	})
    82  
    83  	require.NoError(t, err)
    84  
    85  	require.NotEqual(t, 0, len(w.Peers()))
    86  	require.NoError(t, w.Stop())
    87  }
    88  
    89  func TestRestartDiscoveryV5(t *testing.T) {
    90  	config := &Config{}
    91  	setDefaultConfig(config, false)
    92  	// Use wrong discv5 bootstrap address, to simulate being offline
    93  	config.DiscV5BootstrapNodes = []string{"enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@1.1.1.2"}
    94  	config.DiscoveryLimit = 20
    95  	config.UDPPort = 10002
    96  	config.ClusterID = 16
    97  	w, err := New(nil, "", config, nil, nil, nil, nil, nil)
    98  	require.NoError(t, err)
    99  
   100  	require.NoError(t, w.Start())
   101  	require.False(t, w.seededBootnodesForDiscV5)
   102  
   103  	options := func(b *backoff.ExponentialBackOff) {
   104  		b.MaxElapsedTime = 2 * time.Second
   105  	}
   106  
   107  	// Sanity check, not great, but it's probably helpful
   108  	err = tt.RetryWithBackOff(func() error {
   109  		if len(w.Peers()) == 0 {
   110  			return errors.New("no peers discovered")
   111  		}
   112  		return nil
   113  	}, options)
   114  
   115  	require.Error(t, err)
   116  
   117  	w.discV5BootstrapNodes = []string{testStoreENRBootstrap}
   118  
   119  	options = func(b *backoff.ExponentialBackOff) {
   120  		b.MaxElapsedTime = 90 * time.Second
   121  	}
   122  
   123  	err = tt.RetryWithBackOff(func() error {
   124  		if len(w.Peers()) == 0 {
   125  			return errors.New("no peers discovered")
   126  		}
   127  		return nil
   128  	}, options)
   129  	require.NoError(t, err)
   130  
   131  	require.True(t, w.seededBootnodesForDiscV5)
   132  	require.NotEqual(t, 0, len(w.Peers()))
   133  	require.NoError(t, w.Stop())
   134  }
   135  
   136  func TestRelayPeers(t *testing.T) {
   137  	config := &Config{
   138  		EnableMissingMessageVerification: true,
   139  	}
   140  	setDefaultConfig(config, false)
   141  	w, err := New(nil, "", config, nil, nil, nil, nil, nil)
   142  	require.NoError(t, err)
   143  	require.NoError(t, w.Start())
   144  	_, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
   145  	require.NoError(t, err)
   146  
   147  	// Ensure function returns an error for lightclient
   148  	config = &Config{}
   149  	config.ClusterID = 16
   150  	config.LightClient = true
   151  	w, err = New(nil, "", config, nil, nil, nil, nil, nil)
   152  	require.NoError(t, err)
   153  	require.NoError(t, w.Start())
   154  	_, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
   155  	require.Error(t, err)
   156  }
   157  
   158  func parseNodes(rec []string) []*enode.Node {
   159  	var ns []*enode.Node
   160  	for _, r := range rec {
   161  		var n enode.Node
   162  		if err := n.UnmarshalText([]byte(r)); err != nil {
   163  			panic(err)
   164  		}
   165  		ns = append(ns, &n)
   166  	}
   167  	return ns
   168  }
   169  
   170  // In order to run these tests, you must run an nwaku node
   171  //
   172  // Using Docker:
   173  //
   174  //	IP_ADDRESS=$(hostname -I | awk '{print $1}');
   175  //	docker run \
   176  //	 -p 60000:60000/tcp -p 9000:9000/udp -p 8645:8645/tcp harbor.status.im/wakuorg/nwaku:v0.31.0 \
   177  //	 --tcp-port=60000 --discv5-discovery=true --cluster-id=16 --pubsub-topic=/waku/2/rs/16/32 --pubsub-topic=/waku/2/rs/16/64 \
   178  //	 --nat=extip:${IP_ADDRESS} --discv5-discovery --discv5-udp-port=9000 --rest-address=0.0.0.0 --store
   179  
   180  func TestBasicWakuV2(t *testing.T) {
   181  	nwakuInfo, err := GetNwakuInfo(nil, nil)
   182  	require.NoError(t, err)
   183  
   184  	// Creating a fake DNS Discovery ENRTree
   185  	tree, url := makeTestTree("n", parseNodes([]string{nwakuInfo.EnrUri}), nil)
   186  	enrTreeAddress := url
   187  	envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS")
   188  	if envEnrTreeAddress != "" {
   189  		enrTreeAddress = envEnrTreeAddress
   190  	}
   191  
   192  	config := &Config{}
   193  	setDefaultConfig(config, false)
   194  	config.Port = 0
   195  	config.Resolver = mapResolver(tree.ToTXT("n"))
   196  	config.DiscV5BootstrapNodes = []string{enrTreeAddress}
   197  	config.DiscoveryLimit = 20
   198  	config.WakuNodes = []string{enrTreeAddress}
   199  	w, err := New(nil, "", config, nil, nil, nil, nil, nil)
   200  	require.NoError(t, err)
   201  	require.NoError(t, w.Start())
   202  
   203  	enr, err := w.ENR()
   204  	require.NoError(t, err)
   205  	require.NotNil(t, enr)
   206  
   207  	// DNSDiscovery
   208  	ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
   209  	defer cancel()
   210  
   211  	discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrTreeAddress, dnsdisc.WithResolver(config.Resolver))
   212  	require.NoError(t, err)
   213  
   214  	// Peer used for retrieving history
   215  	r, err := rand.Int(rand.Reader, big.NewInt(int64(len(discoveredNodes))))
   216  	require.NoError(t, err)
   217  
   218  	storeNode := discoveredNodes[int(r.Int64())]
   219  
   220  	options := func(b *backoff.ExponentialBackOff) {
   221  		b.MaxElapsedTime = 30 * time.Second
   222  	}
   223  
   224  	// Sanity check, not great, but it's probably helpful
   225  	err = tt.RetryWithBackOff(func() error {
   226  		if len(w.Peers()) < 1 {
   227  			return errors.New("no peers discovered")
   228  		}
   229  		return nil
   230  	}, options)
   231  	require.NoError(t, err)
   232  
   233  	// Dropping Peer
   234  	err = w.DropPeer(storeNode.PeerID)
   235  	require.NoError(t, err)
   236  
   237  	// Dialing with peerID
   238  	err = w.DialPeerByID(storeNode.PeerID)
   239  	require.NoError(t, err)
   240  
   241  	err = tt.RetryWithBackOff(func() error {
   242  		if len(w.Peers()) < 1 {
   243  			return errors.New("no peers discovered")
   244  		}
   245  		return nil
   246  	}, options)
   247  	require.NoError(t, err)
   248  
   249  	filter := &common.Filter{
   250  		PubsubTopic:   config.DefaultShardPubsubTopic,
   251  		Messages:      common.NewMemoryMessageStore(),
   252  		ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
   253  	}
   254  
   255  	_, err = w.Subscribe(filter)
   256  	require.NoError(t, err)
   257  
   258  	msgTimestamp := w.timestamp()
   259  	contentTopic := maps.Keys(filter.ContentTopics)[0]
   260  
   261  	time.Sleep(2 * time.Second)
   262  
   263  	_, err = w.Send(config.DefaultShardPubsubTopic, &pb.WakuMessage{
   264  		Payload:      []byte{1, 2, 3, 4, 5},
   265  		ContentTopic: contentTopic.ContentTopic(),
   266  		Version:      proto.Uint32(0),
   267  		Timestamp:    &msgTimestamp,
   268  	}, nil)
   269  
   270  	require.NoError(t, err)
   271  
   272  	time.Sleep(1 * time.Second)
   273  
   274  	messages := filter.Retrieve()
   275  	require.Len(t, messages, 1)
   276  
   277  	timestampInSeconds := msgTimestamp / int64(time.Second)
   278  	marginInSeconds := 20
   279  
   280  	options = func(b *backoff.ExponentialBackOff) {
   281  		b.MaxElapsedTime = 60 * time.Second
   282  		b.InitialInterval = 500 * time.Millisecond
   283  	}
   284  	err = tt.RetryWithBackOff(func() error {
   285  		_, envelopeCount, err := w.Query(
   286  			context.Background(),
   287  			storeNode.PeerID,
   288  			store.FilterCriteria{
   289  				ContentFilter: protocol.NewContentFilter(config.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
   290  				TimeStart:     proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
   291  				TimeEnd:       proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
   292  			},
   293  			nil,
   294  			nil,
   295  			false,
   296  		)
   297  		if err != nil || envelopeCount == 0 {
   298  			// in case of failure extend timestamp margin up to 40secs
   299  			if marginInSeconds < 40 {
   300  				marginInSeconds += 5
   301  			}
   302  			return errors.New("no messages received from store node")
   303  		}
   304  		return nil
   305  	}, options)
   306  	require.NoError(t, err)
   307  
   308  	require.NoError(t, w.Stop())
   309  }
   310  
   311  type mapResolver map[string]string
   312  
   313  func (mr mapResolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
   314  	if record, ok := mr[name]; ok {
   315  		return []string{record}, nil
   316  	}
   317  	return nil, errors.New("not found")
   318  }
   319  
   320  var signingKeyForTesting, _ = crypto.ToECDSA(hexutil.MustDecode("0xdc599867fc513f8f5e2c2c9c489cde5e71362d1d9ec6e693e0de063236ed1240"))
   321  
   322  func makeTestTree(domain string, nodes []*enode.Node, links []string) (*ethdnsdisc.Tree, string) {
   323  	tree, err := ethdnsdisc.MakeTree(1, nodes, links)
   324  	if err != nil {
   325  		panic(err)
   326  	}
   327  	url, err := tree.Sign(signingKeyForTesting, domain)
   328  	if err != nil {
   329  		panic(err)
   330  	}
   331  	return tree, url
   332  }
   333  
   334  func TestPeerExchange(t *testing.T) {
   335  	logger, err := zap.NewDevelopment()
   336  	require.NoError(t, err)
   337  	// start node which serve as PeerExchange server
   338  	config := &Config{}
   339  	config.ClusterID = 16
   340  	config.EnableDiscV5 = true
   341  	config.EnablePeerExchangeServer = true
   342  	config.EnablePeerExchangeClient = false
   343  	pxServerNode, err := New(nil, "", config, logger.Named("pxServerNode"), nil, nil, nil, nil)
   344  	require.NoError(t, err)
   345  	require.NoError(t, pxServerNode.Start())
   346  
   347  	time.Sleep(1 * time.Second)
   348  
   349  	// start node that will be discovered by PeerExchange
   350  	config = &Config{}
   351  	config.ClusterID = 16
   352  	config.EnableDiscV5 = true
   353  	config.EnablePeerExchangeServer = false
   354  	config.EnablePeerExchangeClient = false
   355  	config.DiscV5BootstrapNodes = []string{pxServerNode.node.ENR().String()}
   356  	discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil)
   357  	require.NoError(t, err)
   358  	require.NoError(t, discV5Node.Start())
   359  
   360  	time.Sleep(1 * time.Second)
   361  
   362  	// start light node which use PeerExchange to discover peers
   363  	enrNodes := []*enode.Node{pxServerNode.node.ENR()}
   364  	tree, url := makeTestTree("n", enrNodes, nil)
   365  	resolver := mapResolver(tree.ToTXT("n"))
   366  
   367  	config = &Config{}
   368  	config.ClusterID = 16
   369  	config.EnablePeerExchangeServer = false
   370  	config.EnablePeerExchangeClient = true
   371  	config.LightClient = true
   372  	config.Resolver = resolver
   373  
   374  	config.WakuNodes = []string{url}
   375  	lightNode, err := New(nil, "", config, logger.Named("lightNode"), nil, nil, nil, nil)
   376  	require.NoError(t, err)
   377  	require.NoError(t, lightNode.Start())
   378  
   379  	// Sanity check, not great, but it's probably helpful
   380  	options := func(b *backoff.ExponentialBackOff) {
   381  		b.MaxElapsedTime = 30 * time.Second
   382  	}
   383  	err = tt.RetryWithBackOff(func() error {
   384  		// we should not use lightNode.Peers() here as it only indicates peers that are connected right now,
   385  		// in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)`
   386  		// after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore
   387  		// won't get deleted especially if they are statically added.
   388  		if len(lightNode.node.Host().Peerstore().Peers()) == 2 {
   389  			return nil
   390  		}
   391  		return errors.New("no peers discovered")
   392  	}, options)
   393  	require.NoError(t, err)
   394  
   395  	ctx, cancel := context.WithCancel(context.Background())
   396  	defer cancel()
   397  	require.NoError(t, discV5Node.node.PeerExchange().Request(ctx, 1))
   398  	require.Error(t, discV5Node.node.PeerExchange().Request(ctx, 1)) //should fail due to rate limit
   399  
   400  	require.NoError(t, lightNode.Stop())
   401  	require.NoError(t, pxServerNode.Stop())
   402  	require.NoError(t, discV5Node.Stop())
   403  }
   404  
   405  func TestWakuV2Filter(t *testing.T) {
   406  	t.Skip("flaky test")
   407  
   408  	enrTreeAddress := testBootENRBootstrap
   409  	envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS")
   410  	if envEnrTreeAddress != "" {
   411  		enrTreeAddress = envEnrTreeAddress
   412  	}
   413  	config := &Config{}
   414  	setDefaultConfig(config, true)
   415  	config.EnablePeerExchangeClient = false
   416  	config.Port = 0
   417  	config.MinPeersForFilter = 2
   418  
   419  	config.DiscV5BootstrapNodes = []string{enrTreeAddress}
   420  	config.DiscoveryLimit = 20
   421  	config.WakuNodes = []string{enrTreeAddress}
   422  	w, err := New(nil, "", config, nil, nil, nil, nil, nil)
   423  	require.NoError(t, err)
   424  	require.NoError(t, w.Start())
   425  
   426  	options := func(b *backoff.ExponentialBackOff) {
   427  		b.MaxElapsedTime = 10 * time.Second
   428  	}
   429  	time.Sleep(10 * time.Second) //TODO: Check if we can remove this sleep.
   430  
   431  	// Sanity check, not great, but it's probably helpful
   432  	err = tt.RetryWithBackOff(func() error {
   433  		peers, err := w.node.PeerManager().FilterPeersByProto(nil, nil, filter.FilterSubscribeID_v20beta1)
   434  		if err != nil {
   435  			return err
   436  		}
   437  		if len(peers) < 2 {
   438  			return errors.New("no peers discovered")
   439  		}
   440  		return nil
   441  	}, options)
   442  	require.NoError(t, err)
   443  	testPubsubTopic := "/waku/2/rs/16/32"
   444  	contentTopicBytes := make([]byte, 4)
   445  	_, err = rand.Read(contentTopicBytes)
   446  	require.NoError(t, err)
   447  	filter := &common.Filter{
   448  		Messages:      common.NewMemoryMessageStore(),
   449  		PubsubTopic:   testPubsubTopic,
   450  		ContentTopics: common.NewTopicSetFromBytes([][]byte{contentTopicBytes}),
   451  	}
   452  
   453  	fID, err := w.Subscribe(filter)
   454  	require.NoError(t, err)
   455  
   456  	msgTimestamp := w.timestamp()
   457  	contentTopic := maps.Keys(filter.ContentTopics)[0]
   458  
   459  	_, err = w.Send(testPubsubTopic, &pb.WakuMessage{
   460  		Payload:      []byte{1, 2, 3, 4, 5},
   461  		ContentTopic: contentTopic.ContentTopic(),
   462  		Version:      proto.Uint32(0),
   463  		Timestamp:    &msgTimestamp,
   464  	}, nil)
   465  	require.NoError(t, err)
   466  	time.Sleep(5 * time.Second)
   467  
   468  	// Ensure there is at least 1 active filter subscription
   469  	subscriptions := w.node.FilterLightnode().Subscriptions()
   470  	require.Greater(t, len(subscriptions), 0)
   471  
   472  	messages := filter.Retrieve()
   473  	require.Len(t, messages, 1)
   474  
   475  	// Mock peers going down
   476  	_, err = w.node.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
   477  	require.NoError(t, err)
   478  
   479  	time.Sleep(10 * time.Second)
   480  
   481  	// Ensure there is at least 1 active filter subscription
   482  	subscriptions = w.node.FilterLightnode().Subscriptions()
   483  	require.Greater(t, len(subscriptions), 0)
   484  
   485  	// Ensure that messages are retrieved with a fresh sub
   486  	_, err = w.Send(testPubsubTopic, &pb.WakuMessage{
   487  		Payload:      []byte{1, 2, 3, 4, 5, 6},
   488  		ContentTopic: contentTopic.ContentTopic(),
   489  		Version:      proto.Uint32(0),
   490  		Timestamp:    &msgTimestamp,
   491  	}, nil)
   492  	require.NoError(t, err)
   493  	time.Sleep(10 * time.Second)
   494  
   495  	messages = filter.Retrieve()
   496  	require.Len(t, messages, 1)
   497  	err = w.Unsubscribe(context.Background(), fID)
   498  	require.NoError(t, err)
   499  	require.NoError(t, w.Stop())
   500  }
   501  
   502  func TestWakuV2Store(t *testing.T) {
   503  	t.Skip("deprecated. Storenode must use nwaku")
   504  
   505  	// Configuration for the first Waku node
   506  	config1 := &Config{
   507  		Port:                             0,
   508  		ClusterID:                        16,
   509  		EnableDiscV5:                     false,
   510  		DiscoveryLimit:                   20,
   511  		EnableStore:                      false,
   512  		StoreCapacity:                    100,
   513  		StoreSeconds:                     3600,
   514  		EnableMissingMessageVerification: true,
   515  	}
   516  	w1PeersCh := make(chan peer.IDSlice, 100) // buffered not to block on the send side
   517  
   518  	// Start the first Waku node
   519  	w1, err := New(nil, "", config1, nil, nil, nil, nil, func(cs types.ConnStatus) {
   520  		w1PeersCh <- maps.Keys(cs.Peers)
   521  	})
   522  	require.NoError(t, err)
   523  	require.NoError(t, w1.Start())
   524  	defer func() {
   525  		require.NoError(t, w1.Stop())
   526  		close(w1PeersCh)
   527  	}()
   528  
   529  	// Configuration for the second Waku node
   530  	sql2, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{})
   531  	require.NoError(t, err)
   532  	config2 := &Config{
   533  		Port:           0,
   534  		ClusterID:      16,
   535  		EnableDiscV5:   false,
   536  		DiscoveryLimit: 20,
   537  		EnableStore:    true,
   538  		StoreCapacity:  100,
   539  		StoreSeconds:   3600,
   540  	}
   541  
   542  	// Start the second Waku node
   543  	w2, err := New(nil, "", config2, nil, sql2, nil, nil, nil)
   544  	require.NoError(t, err)
   545  	require.NoError(t, w2.Start())
   546  	w2EnvelopeCh := make(chan common.EnvelopeEvent, 100)
   547  	w2.SubscribeEnvelopeEvents(w2EnvelopeCh)
   548  	defer func() {
   549  		require.NoError(t, w2.Stop())
   550  		close(w2EnvelopeCh)
   551  	}()
   552  
   553  	// Connect the two nodes directly
   554  	peer2Addr := w2.node.ListenAddresses()[0].String()
   555  	err = w1.node.DialPeer(context.Background(), peer2Addr)
   556  	require.NoError(t, err)
   557  
   558  	waitForPeerConnection(t, w2.node.Host().ID(), w1PeersCh)
   559  
   560  	// Create a filter for the second node to catch messages
   561  	filter := &common.Filter{
   562  		Messages:      common.NewMemoryMessageStore(),
   563  		PubsubTopic:   config2.DefaultShardPubsubTopic,
   564  		ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
   565  	}
   566  
   567  	_, err = w2.Subscribe(filter)
   568  	require.NoError(t, err)
   569  
   570  	time.Sleep(2 * time.Second)
   571  
   572  	// Send a message from the first node
   573  	msgTimestamp := w1.CurrentTime().UnixNano()
   574  	contentTopic := maps.Keys(filter.ContentTopics)[0]
   575  	_, err = w1.Send(config1.DefaultShardPubsubTopic, &pb.WakuMessage{
   576  		Payload:      []byte{1, 2, 3, 4, 5},
   577  		ContentTopic: contentTopic.ContentTopic(),
   578  		Version:      proto.Uint32(0),
   579  		Timestamp:    &msgTimestamp,
   580  	}, nil)
   581  	require.NoError(t, err)
   582  
   583  	waitForEnvelope(t, contentTopic.ContentTopic(), w2EnvelopeCh)
   584  
   585  	// Retrieve the message from the second node's filter
   586  	messages := filter.Retrieve()
   587  	require.Len(t, messages, 1)
   588  
   589  	timestampInSeconds := msgTimestamp / int64(time.Second)
   590  	marginInSeconds := 5
   591  	// Query the second node's store for the message
   592  	_, envelopeCount, err := w1.Query(
   593  		context.Background(),
   594  		w2.node.Host().ID(),
   595  		store.FilterCriteria{
   596  			TimeStart:     proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
   597  			TimeEnd:       proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
   598  			ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
   599  		},
   600  		nil,
   601  		nil,
   602  		false,
   603  	)
   604  	require.NoError(t, err)
   605  	require.True(t, envelopeCount > 0, "no messages received from store node")
   606  }
   607  
   608  func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) {
   609  	waitForPeerConnectionWithTimeout(t, peerID, peerCh, 3*time.Second)
   610  }
   611  
   612  func waitForPeerConnectionWithTimeout(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice, timeout time.Duration) {
   613  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   614  	defer cancel()
   615  	for {
   616  		select {
   617  		case peers := <-peerCh:
   618  			for _, p := range peers {
   619  				if p == peerID {
   620  					return
   621  				}
   622  			}
   623  		case <-ctx.Done():
   624  			require.Fail(t, "timed out waiting for peer "+peerID.String())
   625  			return
   626  		}
   627  	}
   628  }
   629  
   630  func waitForEnvelope(t *testing.T, contentTopic string, envCh chan common.EnvelopeEvent) {
   631  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   632  	defer cancel()
   633  	for {
   634  		select {
   635  		case env := <-envCh:
   636  			if env.Topic.ContentTopic() == contentTopic {
   637  				return
   638  			}
   639  		case <-ctx.Done():
   640  			require.Fail(t, "timed out waiting for envelope's topic "+contentTopic)
   641  			return
   642  		}
   643  	}
   644  }
   645  
   646  func TestOnlineChecker(t *testing.T) {
   647  	w, err := New(nil, "shards.staging", nil, nil, nil, nil, nil, nil)
   648  	require.NoError(t, w.Start())
   649  
   650  	require.NoError(t, err)
   651  	require.False(t, w.onlineChecker.IsOnline())
   652  
   653  	w.ConnectionChanged(connection.State{Offline: false})
   654  	require.True(t, w.onlineChecker.IsOnline())
   655  
   656  	wg := sync.WaitGroup{}
   657  	wg.Add(1)
   658  	go func() {
   659  		defer wg.Done()
   660  		<-w.goingOnline
   661  		require.True(t, true)
   662  	}()
   663  
   664  	time.Sleep(100 * time.Millisecond)
   665  
   666  	w.ConnectionChanged(connection.State{Offline: true})
   667  	require.False(t, w.onlineChecker.IsOnline())
   668  
   669  	// Test lightnode online checker
   670  	config := &Config{}
   671  	config.ClusterID = 16
   672  	config.LightClient = true
   673  	lightNode, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
   674  	require.NoError(t, err)
   675  
   676  	err = lightNode.Start()
   677  	require.NoError(t, err)
   678  
   679  	require.False(t, lightNode.onlineChecker.IsOnline())
   680  	f := &common.Filter{}
   681  	lightNode.filterManager.SubscribeFilter("test", protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...))
   682  
   683  }
   684  
   685  func TestLightpushRateLimit(t *testing.T) {
   686  	logger, err := zap.NewDevelopment()
   687  	require.NoError(t, err)
   688  
   689  	config0 := &Config{}
   690  	setDefaultConfig(config0, false)
   691  	w0PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
   692  
   693  	// Start the relayu node
   694  	w0, err := New(nil, "", config0, logger.Named("relayNode"), nil, nil, nil, func(cs types.ConnStatus) {
   695  		w0PeersCh <- maps.Keys(cs.Peers)
   696  	})
   697  	require.NoError(t, err)
   698  	require.NoError(t, w0.Start())
   699  	defer func() {
   700  		require.NoError(t, w0.Stop())
   701  		close(w0PeersCh)
   702  	}()
   703  
   704  	contentTopics := common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}})
   705  	filter := &common.Filter{
   706  		PubsubTopic:   config0.DefaultShardPubsubTopic,
   707  		Messages:      common.NewMemoryMessageStore(),
   708  		ContentTopics: contentTopics,
   709  	}
   710  
   711  	_, err = w0.Subscribe(filter)
   712  	require.NoError(t, err)
   713  
   714  	config1 := &Config{}
   715  	setDefaultConfig(config1, false)
   716  	w1PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
   717  
   718  	// Start the full node
   719  	w1, err := New(nil, "", config1, logger.Named("fullNode"), nil, nil, nil, func(cs types.ConnStatus) {
   720  		w1PeersCh <- maps.Keys(cs.Peers)
   721  	})
   722  	require.NoError(t, err)
   723  	require.NoError(t, w1.Start())
   724  	defer func() {
   725  		require.NoError(t, w1.Stop())
   726  		close(w1PeersCh)
   727  	}()
   728  
   729  	ctx, cancel := context.WithCancel(context.Background())
   730  	defer cancel()
   731  	//Connect the relay peer and full node
   732  	err = w1.node.DialPeer(ctx, w0.node.ListenAddresses()[0].String())
   733  	require.NoError(t, err)
   734  
   735  	err = tt.RetryWithBackOff(func() error {
   736  		if len(w1.Peers()) == 0 {
   737  			return errors.New("no peers discovered")
   738  		}
   739  		return nil
   740  	})
   741  	require.NoError(t, err)
   742  
   743  	config2 := &Config{}
   744  	setDefaultConfig(config2, true)
   745  	w2PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
   746  
   747  	// Start the light node
   748  	w2, err := New(nil, "", config2, logger.Named("lightNode"), nil, nil, nil, func(cs types.ConnStatus) {
   749  		w2PeersCh <- maps.Keys(cs.Peers)
   750  	})
   751  	require.NoError(t, err)
   752  	require.NoError(t, w2.Start())
   753  	defer func() {
   754  		require.NoError(t, w2.Stop())
   755  		close(w2PeersCh)
   756  	}()
   757  
   758  	//Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
   759  	w2.node.AddDiscoveredPeer(w1.PeerID(), w1.node.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
   760  
   761  	waitForPeerConnectionWithTimeout(t, w2.node.Host().ID(), w1PeersCh, 5*time.Second)
   762  
   763  	event := make(chan common.EnvelopeEvent, 10)
   764  	w2.SubscribeEnvelopeEvents(event)
   765  
   766  	for i := range [4]int{} {
   767  		msgTimestamp := w2.timestamp()
   768  		_, err := w2.Send(config2.DefaultShardPubsubTopic, &pb.WakuMessage{
   769  			Payload:      []byte{1, 2, 3, 4, 5, 6, byte(i)},
   770  			ContentTopic: maps.Keys(contentTopics)[0].ContentTopic(),
   771  			Version:      proto.Uint32(0),
   772  			Timestamp:    &msgTimestamp,
   773  		}, nil)
   774  
   775  		require.NoError(t, err)
   776  
   777  		time.Sleep(550 * time.Millisecond)
   778  
   779  	}
   780  
   781  	messages := filter.Retrieve()
   782  	require.Len(t, messages, 2)
   783  
   784  }
   785  
   786  func TestTelemetryFormat(t *testing.T) {
   787  	logger, err := zap.NewDevelopment()
   788  	require.NoError(t, err)
   789  
   790  	tc := NewBandwidthTelemetryClient(logger, "#")
   791  
   792  	s := metrics.Stats{
   793  		TotalIn:  10,
   794  		TotalOut: 20,
   795  		RateIn:   30,
   796  		RateOut:  40,
   797  	}
   798  
   799  	m := make(map[libp2pprotocol.ID]metrics.Stats)
   800  	m[relay.WakuRelayID_v200] = s
   801  	m[filter.FilterPushID_v20beta1] = s
   802  	m[filter.FilterSubscribeID_v20beta1] = s
   803  	m[legacy_store.StoreID_v20beta4] = s
   804  	m[lightpush.LightPushID_v20beta1] = s
   805  
   806  	requestBody := tc.getTelemetryRequestBody(m)
   807  	_, err = json.Marshal(requestBody)
   808  	require.NoError(t, err)
   809  }