github.com/status-im/status-go@v1.1.0/wakuv2/waku.go (about)

     1  // Copyright 2019 The Waku Library Authors.
     2  //
     3  // The Waku library is free software: you can redistribute it and/or modify
     4  // it under the terms of the GNU Lesser General Public License as published by
     5  // the Free Software Foundation, either version 3 of the License, or
     6  // (at your option) any later version.
     7  //
     8  // The Waku library is distributed in the hope that it will be useful,
     9  // but WITHOUT ANY WARRANTY; without even the implied warranty off
    10  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    11  // GNU Lesser General Public License for more details.
    12  //
    13  // You should have received a copy of the GNU Lesser General Public License
    14  // along with the Waku library. If not, see <http://www.gnu.org/licenses/>.
    15  //
    16  // This software uses the go-ethereum library, which is licensed
    17  // under the GNU Lesser General Public Library, version 3 or any later.
    18  
    19  package wakuv2
    20  
    21  import (
    22  	"context"
    23  	"crypto/ecdsa"
    24  	"crypto/sha256"
    25  	"database/sql"
    26  	"errors"
    27  	"fmt"
    28  	"math"
    29  	"net"
    30  	"runtime"
    31  	"strings"
    32  	"sync"
    33  	"testing"
    34  	"time"
    35  
    36  	"github.com/jellydator/ttlcache/v3"
    37  	"github.com/libp2p/go-libp2p/core/peer"
    38  	"github.com/libp2p/go-libp2p/core/peerstore"
    39  	"github.com/multiformats/go-multiaddr"
    40  
    41  	"go.uber.org/zap"
    42  
    43  	"golang.org/x/crypto/pbkdf2"
    44  	"golang.org/x/time/rate"
    45  
    46  	gethcommon "github.com/ethereum/go-ethereum/common"
    47  	"github.com/ethereum/go-ethereum/common/hexutil"
    48  	"github.com/ethereum/go-ethereum/crypto"
    49  	"github.com/ethereum/go-ethereum/event"
    50  	"github.com/ethereum/go-ethereum/p2p"
    51  	"github.com/ethereum/go-ethereum/p2p/enode"
    52  	"github.com/ethereum/go-ethereum/rpc"
    53  
    54  	"github.com/libp2p/go-libp2p"
    55  	pubsub "github.com/libp2p/go-libp2p-pubsub"
    56  	"github.com/libp2p/go-libp2p/core/metrics"
    57  	"github.com/libp2p/go-libp2p/p2p/protocol/ping"
    58  
    59  	filterapi "github.com/waku-org/go-waku/waku/v2/api/filter"
    60  	"github.com/waku-org/go-waku/waku/v2/api/missing"
    61  	"github.com/waku-org/go-waku/waku/v2/api/publish"
    62  	"github.com/waku-org/go-waku/waku/v2/dnsdisc"
    63  	"github.com/waku-org/go-waku/waku/v2/onlinechecker"
    64  	"github.com/waku-org/go-waku/waku/v2/peermanager"
    65  	wps "github.com/waku-org/go-waku/waku/v2/peerstore"
    66  	"github.com/waku-org/go-waku/waku/v2/protocol"
    67  	"github.com/waku-org/go-waku/waku/v2/protocol/filter"
    68  	"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
    69  	"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
    70  	"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
    71  	"github.com/waku-org/go-waku/waku/v2/protocol/relay"
    72  	"github.com/waku-org/go-waku/waku/v2/protocol/store"
    73  	"github.com/waku-org/go-waku/waku/v2/utils"
    74  
    75  	"github.com/status-im/status-go/connection"
    76  	"github.com/status-im/status-go/eth-node/types"
    77  	"github.com/status-im/status-go/logutils"
    78  	"github.com/status-im/status-go/timesource"
    79  	"github.com/status-im/status-go/wakuv2/common"
    80  	"github.com/status-im/status-go/wakuv2/persistence"
    81  
    82  	node "github.com/waku-org/go-waku/waku/v2/node"
    83  	"github.com/waku-org/go-waku/waku/v2/protocol/pb"
    84  )
    85  
    86  const messageQueueLimit = 1024
    87  const requestTimeout = 30 * time.Second
    88  const bootnodesQueryBackoffMs = 200
    89  const bootnodesMaxRetries = 7
    90  const cacheTTL = 20 * time.Minute
    91  const maxRelayPeers = 300
    92  const randomPeersKeepAliveInterval = 5 * time.Second
    93  const allPeersKeepAliveInterval = 5 * time.Minute
    94  
    95  type SentEnvelope struct {
    96  	Envelope      *protocol.Envelope
    97  	PublishMethod publish.PublishMethod
    98  }
    99  
   100  type ErrorSendingEnvelope struct {
   101  	Error        error
   102  	SentEnvelope SentEnvelope
   103  }
   104  
   105  type ITelemetryClient interface {
   106  	SetDeviceType(deviceType string)
   107  	PushReceivedEnvelope(ctx context.Context, receivedEnvelope *protocol.Envelope)
   108  	PushSentEnvelope(ctx context.Context, sentEnvelope SentEnvelope)
   109  	PushErrorSendingEnvelope(ctx context.Context, errorSendingEnvelope ErrorSendingEnvelope)
   110  	PushPeerCount(ctx context.Context, peerCount int)
   111  	PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int)
   112  	PushMessageCheckSuccess(ctx context.Context, messageHash string)
   113  	PushMessageCheckFailure(ctx context.Context, messageHash string)
   114  	PushPeerCountByShard(ctx context.Context, peerCountByShard map[uint16]uint)
   115  	PushPeerCountByOrigin(ctx context.Context, peerCountByOrigin map[wps.Origin]uint)
   116  }
   117  
   118  // Waku represents a dark communication interface through the Ethereum
   119  // network, using its very own P2P communication layer.
   120  type Waku struct {
   121  	node  *node.WakuNode // reference to a libp2p waku node
   122  	appDB *sql.DB
   123  
   124  	dnsAddressCache     map[string][]dnsdisc.DiscoveredNode // Map to store the multiaddresses returned by dns discovery
   125  	dnsAddressCacheLock *sync.RWMutex                       // lock to handle access to the map
   126  
   127  	// Filter-related
   128  	filters       *common.Filters // Message filters installed with Subscribe function
   129  	filterManager *filterapi.FilterManager
   130  
   131  	privateKeys map[string]*ecdsa.PrivateKey // Private key storage
   132  	symKeys     map[string][]byte            // Symmetric key storage
   133  	keyMu       sync.RWMutex                 // Mutex associated with key stores
   134  
   135  	envelopeCache *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] // Pool of envelopes currently tracked by this node
   136  	poolMu        sync.RWMutex                                              // Mutex to sync the message and expiration pools
   137  
   138  	bandwidthCounter *metrics.BandwidthCounter
   139  
   140  	protectedTopicStore *persistence.ProtectedTopicsStore
   141  
   142  	sendQueue *publish.MessageQueue
   143  
   144  	missingMsgVerifier *missing.MissingMessageVerifier
   145  
   146  	msgQueue chan *common.ReceivedMessage // Message queue for waku messages that havent been decoded
   147  
   148  	ctx    context.Context
   149  	cancel context.CancelFunc
   150  	wg     sync.WaitGroup
   151  
   152  	cfg     *Config
   153  	options []node.WakuNodeOption
   154  
   155  	envelopeFeed event.Feed
   156  
   157  	storeMsgIDs   map[gethcommon.Hash]bool // Map of the currently processing ids
   158  	storeMsgIDsMu sync.RWMutex
   159  
   160  	messageSender *publish.MessageSender
   161  
   162  	topicHealthStatusChan   chan peermanager.TopicHealthStatus
   163  	connectionNotifChan     chan node.PeerConnection
   164  	connStatusSubscriptions map[string]*types.ConnStatusSubscription
   165  	connStatusMu            sync.Mutex
   166  	onlineChecker           *onlinechecker.DefaultOnlineChecker
   167  	state                   connection.State
   168  
   169  	logger *zap.Logger
   170  
   171  	// NTP Synced timesource
   172  	timesource *timesource.NTPTimeSource
   173  
   174  	// seededBootnodesForDiscV5 indicates whether we manage to retrieve discovery
   175  	// bootnodes successfully
   176  	seededBootnodesForDiscV5 bool
   177  
   178  	// goingOnline is channel that notifies when connectivity has changed from offline to online
   179  	goingOnline chan struct{}
   180  
   181  	// discV5BootstrapNodes is the ENR to be used to fetch bootstrap nodes for discovery
   182  	discV5BootstrapNodes []string
   183  
   184  	onHistoricMessagesRequestFailed func([]byte, peer.ID, error)
   185  	onPeerStats                     func(types.ConnStatus)
   186  
   187  	statusTelemetryClient ITelemetryClient
   188  
   189  	defaultShardInfo protocol.RelayShards
   190  }
   191  
   192  func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) {
   193  	w.statusTelemetryClient = client
   194  }
   195  
   196  func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] {
   197  	cache := ttlcache.New[gethcommon.Hash, *common.ReceivedMessage](ttlcache.WithTTL[gethcommon.Hash, *common.ReceivedMessage](cacheTTL))
   198  	go cache.Start()
   199  	return cache
   200  }
   201  
   202  // New creates a WakuV2 client ready to communicate through the LibP2P network.
   203  func New(nodeKey *ecdsa.PrivateKey, fleet string, cfg *Config, logger *zap.Logger, appDB *sql.DB, ts *timesource.NTPTimeSource, onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*Waku, error) {
   204  	var err error
   205  	if logger == nil {
   206  		logger, err = zap.NewDevelopment()
   207  		if err != nil {
   208  			return nil, err
   209  		}
   210  	}
   211  
   212  	if ts == nil {
   213  		ts = timesource.Default()
   214  	}
   215  
   216  	cfg = setDefaults(cfg)
   217  	if err = cfg.Validate(logger); err != nil {
   218  		return nil, err
   219  	}
   220  
   221  	logger.Info("starting wakuv2 with config", zap.Any("config", cfg))
   222  
   223  	ctx, cancel := context.WithCancel(context.Background())
   224  
   225  	waku := &Waku{
   226  		appDB:                           appDB,
   227  		cfg:                             cfg,
   228  		privateKeys:                     make(map[string]*ecdsa.PrivateKey),
   229  		symKeys:                         make(map[string][]byte),
   230  		envelopeCache:                   newTTLCache(),
   231  		msgQueue:                        make(chan *common.ReceivedMessage, messageQueueLimit),
   232  		topicHealthStatusChan:           make(chan peermanager.TopicHealthStatus, 100),
   233  		connectionNotifChan:             make(chan node.PeerConnection, 20),
   234  		connStatusSubscriptions:         make(map[string]*types.ConnStatusSubscription),
   235  		ctx:                             ctx,
   236  		cancel:                          cancel,
   237  		wg:                              sync.WaitGroup{},
   238  		dnsAddressCache:                 make(map[string][]dnsdisc.DiscoveredNode),
   239  		dnsAddressCacheLock:             &sync.RWMutex{},
   240  		storeMsgIDs:                     make(map[gethcommon.Hash]bool),
   241  		timesource:                      ts,
   242  		storeMsgIDsMu:                   sync.RWMutex{},
   243  		logger:                          logger,
   244  		discV5BootstrapNodes:            cfg.DiscV5BootstrapNodes,
   245  		onHistoricMessagesRequestFailed: onHistoricMessagesRequestFailed,
   246  		onPeerStats:                     onPeerStats,
   247  		onlineChecker:                   onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker),
   248  		sendQueue:                       publish.NewMessageQueue(1000, cfg.UseThrottledPublish),
   249  	}
   250  
   251  	waku.filters = common.NewFilters(waku.cfg.DefaultShardPubsubTopic, waku.logger)
   252  	waku.bandwidthCounter = metrics.NewBandwidthCounter()
   253  
   254  	if nodeKey == nil {
   255  		// No nodekey is provided, create an ephemeral key
   256  		nodeKey, err = crypto.GenerateKey()
   257  		if err != nil {
   258  			return nil, fmt.Errorf("failed to generate a random go-waku private key: %v", err)
   259  		}
   260  	}
   261  
   262  	hostAddr, err := net.ResolveTCPAddr("tcp", fmt.Sprint(cfg.Host, ":", cfg.Port))
   263  	if err != nil {
   264  		return nil, fmt.Errorf("failed to setup the network interface: %v", err)
   265  	}
   266  
   267  	libp2pOpts := node.DefaultLibP2POptions
   268  	libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(waku.bandwidthCounter))
   269  	libp2pOpts = append(libp2pOpts, libp2p.NATPortMap())
   270  
   271  	opts := []node.WakuNodeOption{
   272  		node.WithLibP2POptions(libp2pOpts...),
   273  		node.WithPrivateKey(nodeKey),
   274  		node.WithHostAddress(hostAddr),
   275  		node.WithConnectionNotification(waku.connectionNotifChan),
   276  		node.WithTopicHealthStatusChannel(waku.topicHealthStatusChan),
   277  		node.WithKeepAlive(randomPeersKeepAliveInterval, allPeersKeepAliveInterval),
   278  		node.WithLogger(logger),
   279  		node.WithLogLevel(logger.Level()),
   280  		node.WithClusterID(cfg.ClusterID),
   281  		node.WithMaxMsgSize(1024 * 1024),
   282  	}
   283  
   284  	if cfg.EnableDiscV5 {
   285  		bootnodes, err := waku.getDiscV5BootstrapNodes(waku.ctx, cfg.DiscV5BootstrapNodes)
   286  		if err != nil {
   287  			logger.Error("failed to get bootstrap nodes", zap.Error(err))
   288  			return nil, err
   289  		}
   290  		opts = append(opts, node.WithDiscoveryV5(uint(cfg.UDPPort), bootnodes, cfg.AutoUpdate))
   291  	}
   292  	shards, err := protocol.TopicsToRelayShards(cfg.DefaultShardPubsubTopic)
   293  	if err != nil {
   294  		logger.Error("FATAL ERROR: failed to parse relay shards", zap.Error(err))
   295  		return nil, errors.New("failed to parse relay shard, invalid pubsubTopic configuration")
   296  	}
   297  	if len(shards) == 0 { //Hack so that tests don't fail. TODO: Need to remove this once tests are changed to use proper cluster and shard.
   298  		shardInfo := protocol.RelayShards{ClusterID: 0, ShardIDs: []uint16{0}}
   299  		shards = append(shards, shardInfo)
   300  	}
   301  	waku.defaultShardInfo = shards[0]
   302  	if cfg.LightClient {
   303  		opts = append(opts, node.WithWakuFilterLightNode())
   304  		waku.defaultShardInfo = shards[0]
   305  		opts = append(opts, node.WithMaxPeerConnections(cfg.DiscoveryLimit))
   306  		cfg.EnableStoreConfirmationForMessagesSent = false
   307  		//TODO: temporary work-around to improve lightClient connectivity, need to be removed once community sharding is implemented
   308  		opts = append(opts, node.WithPubSubTopics(cfg.DefaultShardedPubsubTopics))
   309  	} else {
   310  		relayOpts := []pubsub.Option{
   311  			pubsub.WithMaxMessageSize(int(waku.cfg.MaxMessageSize)),
   312  		}
   313  
   314  		if testing.Testing() {
   315  			relayOpts = append(relayOpts, pubsub.WithEventTracer(waku))
   316  		}
   317  
   318  		opts = append(opts, node.WithWakuRelayAndMinPeers(waku.cfg.MinPeersForRelay, relayOpts...))
   319  		opts = append(opts, node.WithMaxPeerConnections(maxRelayPeers))
   320  		cfg.EnablePeerExchangeClient = true //Enabling this until discv5 issues are resolved. This will enable more peers to be connected for relay mesh.
   321  		cfg.EnableStoreConfirmationForMessagesSent = true
   322  	}
   323  
   324  	if cfg.EnableStore {
   325  		if appDB == nil {
   326  			return nil, errors.New("appDB is required for store")
   327  		}
   328  		opts = append(opts, node.WithWakuStore())
   329  		dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(appDB), persistence.WithRetentionPolicy(cfg.StoreCapacity, time.Duration(cfg.StoreSeconds)*time.Second))
   330  		if err != nil {
   331  			return nil, err
   332  		}
   333  		opts = append(opts, node.WithMessageProvider(dbStore))
   334  	}
   335  
   336  	if !cfg.LightClient {
   337  		opts = append(opts, node.WithWakuFilterFullNode(filter.WithMaxSubscribers(20)))
   338  		opts = append(opts, node.WithLightPush(lightpush.WithRateLimiter(1, 1)))
   339  	}
   340  
   341  	if appDB != nil {
   342  		waku.protectedTopicStore, err = persistence.NewProtectedTopicsStore(logger, appDB)
   343  		if err != nil {
   344  			return nil, err
   345  		}
   346  	}
   347  
   348  	if cfg.EnablePeerExchangeServer {
   349  		opts = append(opts, node.WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)))
   350  	}
   351  
   352  	waku.options = opts
   353  	waku.logger.Info("setup the go-waku node successfully")
   354  
   355  	return waku, nil
   356  }
   357  
   358  func (w *Waku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
   359  	w.connStatusMu.Lock()
   360  	defer w.connStatusMu.Unlock()
   361  	subscription := types.NewConnStatusSubscription()
   362  	w.connStatusSubscriptions[subscription.ID] = subscription
   363  	return subscription
   364  }
   365  
   366  func (w *Waku) GetNodeENRString() (string, error) {
   367  	if w.node == nil {
   368  		return "", errors.New("node not initialized")
   369  	}
   370  	return w.node.ENR().String(), nil
   371  }
   372  
   373  func (w *Waku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
   374  	wg := sync.WaitGroup{}
   375  	mu := sync.Mutex{}
   376  	var result []*enode.Node
   377  
   378  	w.seededBootnodesForDiscV5 = true
   379  
   380  	retrieveENR := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
   381  		mu.Lock()
   382  		defer mu.Unlock()
   383  		defer wg.Done()
   384  		if d.ENR != nil {
   385  			result = append(result, d.ENR)
   386  		}
   387  	}
   388  
   389  	for _, addrString := range addresses {
   390  		if addrString == "" {
   391  			continue
   392  		}
   393  
   394  		if strings.HasPrefix(addrString, "enrtree://") {
   395  			// Use DNS Discovery
   396  			wg.Add(1)
   397  			go func(addr string) {
   398  				defer wg.Done()
   399  				if err := w.dnsDiscover(ctx, addr, retrieveENR); err != nil {
   400  					mu.Lock()
   401  					w.seededBootnodesForDiscV5 = false
   402  					mu.Unlock()
   403  				}
   404  			}(addrString)
   405  		} else {
   406  			// It's a normal enr
   407  			bootnode, err := enode.Parse(enode.ValidSchemes, addrString)
   408  			if err != nil {
   409  				return nil, err
   410  			}
   411  			result = append(result, bootnode)
   412  		}
   413  	}
   414  	wg.Wait()
   415  
   416  	return result, nil
   417  }
   418  
   419  type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup)
   420  
   421  func (w *Waku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
   422  	w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress))
   423  	ctx, cancel := context.WithTimeout(ctx, requestTimeout)
   424  	defer cancel()
   425  
   426  	w.dnsAddressCacheLock.Lock()
   427  	defer w.dnsAddressCacheLock.Unlock()
   428  
   429  	discNodes, ok := w.dnsAddressCache[enrtreeAddress]
   430  	if !ok {
   431  		nameserver := w.cfg.Nameserver
   432  		resolver := w.cfg.Resolver
   433  
   434  		var opts []dnsdisc.DNSDiscoveryOption
   435  		if nameserver != "" {
   436  			opts = append(opts, dnsdisc.WithNameserver(nameserver))
   437  		}
   438  		if resolver != nil {
   439  			opts = append(opts, dnsdisc.WithResolver(resolver))
   440  		}
   441  
   442  		discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrtreeAddress, opts...)
   443  		if err != nil {
   444  			w.logger.Warn("dns discovery error ", zap.Error(err))
   445  			return err
   446  		}
   447  
   448  		if len(discoveredNodes) != 0 {
   449  			w.dnsAddressCache[enrtreeAddress] = append(w.dnsAddressCache[enrtreeAddress], discoveredNodes...)
   450  			discNodes = w.dnsAddressCache[enrtreeAddress]
   451  		}
   452  	}
   453  
   454  	wg := &sync.WaitGroup{}
   455  	wg.Add(len(discNodes))
   456  	for _, d := range discNodes {
   457  		apply(d, wg)
   458  	}
   459  	wg.Wait()
   460  
   461  	return nil
   462  }
   463  
   464  func (w *Waku) discoverAndConnectPeers() {
   465  	fnApply := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
   466  		defer wg.Done()
   467  		if len(d.PeerInfo.Addrs) != 0 {
   468  			go w.connect(d.PeerInfo, d.ENR, wps.DNSDiscovery)
   469  		}
   470  	}
   471  
   472  	for _, addrString := range w.cfg.WakuNodes {
   473  		addrString := addrString
   474  		if strings.HasPrefix(addrString, "enrtree://") {
   475  			// Use DNS Discovery
   476  			go func() {
   477  				if err := w.dnsDiscover(w.ctx, addrString, fnApply); err != nil {
   478  					w.logger.Error("could not obtain dns discovery peers for ClusterConfig.WakuNodes", zap.Error(err), zap.String("dnsDiscURL", addrString))
   479  				}
   480  			}()
   481  		} else {
   482  			// It is a normal multiaddress
   483  			addr, err := multiaddr.NewMultiaddr(addrString)
   484  			if err != nil {
   485  				w.logger.Warn("invalid peer multiaddress", zap.String("ma", addrString), zap.Error(err))
   486  				continue
   487  			}
   488  
   489  			peerInfo, err := peer.AddrInfoFromP2pAddr(addr)
   490  			if err != nil {
   491  				w.logger.Warn("invalid peer multiaddress", zap.Stringer("addr", addr), zap.Error(err))
   492  				continue
   493  			}
   494  
   495  			go w.connect(*peerInfo, nil, wps.Static)
   496  		}
   497  	}
   498  }
   499  
   500  func (w *Waku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
   501  	// Connection will be prunned eventually by the connection manager if needed
   502  	// The peer connector in go-waku uses Connect, so it will execute identify as part of its
   503  	w.node.AddDiscoveredPeer(peerInfo.ID, peerInfo.Addrs, origin, w.cfg.DefaultShardedPubsubTopics, enr, true)
   504  }
   505  
   506  func (w *Waku) telemetryBandwidthStats(telemetryServerURL string) {
   507  	defer w.wg.Done()
   508  
   509  	if telemetryServerURL == "" {
   510  		return
   511  	}
   512  
   513  	telemetry := NewBandwidthTelemetryClient(w.logger, telemetryServerURL)
   514  
   515  	ticker := time.NewTicker(time.Second * 20)
   516  	defer ticker.Stop()
   517  
   518  	today := time.Now()
   519  
   520  	for {
   521  		select {
   522  		case <-w.ctx.Done():
   523  			return
   524  		case now := <-ticker.C:
   525  			// Reset totals when day changes
   526  			if now.Day() != today.Day() {
   527  				today = now
   528  				w.bandwidthCounter.Reset()
   529  			}
   530  
   531  			go telemetry.PushProtocolStats(w.bandwidthCounter.GetBandwidthByProtocol())
   532  		}
   533  	}
   534  }
   535  
   536  func (w *Waku) GetStats() types.StatsSummary {
   537  	stats := w.bandwidthCounter.GetBandwidthTotals()
   538  	return types.StatsSummary{
   539  		UploadRate:   uint64(stats.RateOut),
   540  		DownloadRate: uint64(stats.RateIn),
   541  	}
   542  }
   543  
   544  func (w *Waku) runPeerExchangeLoop() {
   545  	defer w.wg.Done()
   546  
   547  	if !w.cfg.EnablePeerExchangeClient {
   548  		// Currently peer exchange client is only used for light nodes
   549  		return
   550  	}
   551  
   552  	ticker := time.NewTicker(time.Second * 5)
   553  	defer ticker.Stop()
   554  
   555  	for {
   556  		select {
   557  		case <-w.ctx.Done():
   558  			w.logger.Debug("Peer exchange loop stopped")
   559  			return
   560  		case <-ticker.C:
   561  			w.logger.Info("Running peer exchange loop")
   562  
   563  			// We select only the nodes discovered via DNS Discovery that support peer exchange
   564  			// We assume that those peers are running peer exchange according to infra config,
   565  			// If not, the peer selection process in go-waku will filter them out anyway
   566  			w.dnsAddressCacheLock.RLock()
   567  			var peers peer.IDSlice
   568  			for _, record := range w.dnsAddressCache {
   569  				for _, discoveredNode := range record {
   570  					if len(discoveredNode.PeerInfo.Addrs) == 0 {
   571  						continue
   572  					}
   573  					// Attempt to connect to the peers.
   574  					// Peers will be added to the libp2p peer store thanks to identify
   575  					go w.connect(discoveredNode.PeerInfo, discoveredNode.ENR, wps.DNSDiscovery)
   576  					peers = append(peers, discoveredNode.PeerID)
   577  				}
   578  			}
   579  			w.dnsAddressCacheLock.RUnlock()
   580  
   581  			if len(peers) != 0 {
   582  				err := w.node.PeerExchange().Request(w.ctx, w.cfg.DiscoveryLimit, peer_exchange.WithAutomaticPeerSelection(peers...),
   583  					peer_exchange.FilterByShard(int(w.defaultShardInfo.ClusterID), int(w.defaultShardInfo.ShardIDs[0])))
   584  				if err != nil {
   585  					w.logger.Error("couldnt request peers via peer exchange", zap.Error(err))
   586  				}
   587  			}
   588  		}
   589  	}
   590  }
   591  
   592  func (w *Waku) GetPubsubTopic(topic string) string {
   593  	if topic == "" {
   594  		topic = w.cfg.DefaultShardPubsubTopic
   595  	}
   596  
   597  	return topic
   598  }
   599  
   600  func (w *Waku) unsubscribeFromPubsubTopicWithWakuRelay(topic string) error {
   601  	topic = w.GetPubsubTopic(topic)
   602  
   603  	if !w.node.Relay().IsSubscribed(topic) {
   604  		return nil
   605  	}
   606  
   607  	contentFilter := protocol.NewContentFilter(topic)
   608  
   609  	return w.node.Relay().Unsubscribe(w.ctx, contentFilter)
   610  }
   611  
   612  func (w *Waku) subscribeToPubsubTopicWithWakuRelay(topic string, pubkey *ecdsa.PublicKey) error {
   613  	if w.cfg.LightClient {
   614  		return errors.New("only available for full nodes")
   615  	}
   616  
   617  	topic = w.GetPubsubTopic(topic)
   618  
   619  	if w.node.Relay().IsSubscribed(topic) {
   620  		return nil
   621  	}
   622  
   623  	if pubkey != nil {
   624  		err := w.node.Relay().AddSignedTopicValidator(topic, pubkey)
   625  		if err != nil {
   626  			return err
   627  		}
   628  	}
   629  
   630  	contentFilter := protocol.NewContentFilter(topic)
   631  
   632  	sub, err := w.node.Relay().Subscribe(w.ctx, contentFilter)
   633  	if err != nil {
   634  		return err
   635  	}
   636  
   637  	w.wg.Add(1)
   638  	go func() {
   639  		defer w.wg.Done()
   640  		for {
   641  			select {
   642  			case <-w.ctx.Done():
   643  				err := w.node.Relay().Unsubscribe(w.ctx, contentFilter)
   644  				if err != nil && !errors.Is(err, context.Canceled) {
   645  					w.logger.Error("could not unsubscribe", zap.Error(err))
   646  				}
   647  				return
   648  			case env := <-sub[0].Ch:
   649  				err := w.OnNewEnvelopes(env, common.RelayedMessageType, false)
   650  				if err != nil {
   651  					w.logger.Error("OnNewEnvelopes error", zap.Error(err))
   652  				}
   653  			}
   654  		}
   655  	}()
   656  
   657  	return nil
   658  }
   659  
   660  // MaxMessageSize returns the maximum accepted message size.
   661  func (w *Waku) MaxMessageSize() uint32 {
   662  	return w.cfg.MaxMessageSize
   663  }
   664  
   665  // CurrentTime returns current time.
   666  func (w *Waku) CurrentTime() time.Time {
   667  	return w.timesource.Now()
   668  }
   669  
   670  // APIs returns the RPC descriptors the Waku implementation offers
   671  func (w *Waku) APIs() []rpc.API {
   672  	return []rpc.API{
   673  		{
   674  			Namespace: Name,
   675  			Version:   VersionStr,
   676  			Service:   NewPublicWakuAPI(w),
   677  			Public:    false,
   678  		},
   679  	}
   680  }
   681  
   682  // Protocols returns the waku sub-protocols ran by this particular client.
   683  func (w *Waku) Protocols() []p2p.Protocol {
   684  	return []p2p.Protocol{}
   685  }
   686  
   687  func (w *Waku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
   688  	return w.envelopeFeed.Send(event)
   689  }
   690  
   691  // SubscribeEnvelopeEvents subscribes to envelopes feed.
   692  // In order to prevent blocking waku producers events must be amply buffered.
   693  func (w *Waku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
   694  	return w.envelopeFeed.Subscribe(events)
   695  }
   696  
   697  // NewKeyPair generates a new cryptographic identity for the client, and injects
   698  // it into the known identities for message decryption. Returns ID of the new key pair.
   699  func (w *Waku) NewKeyPair() (string, error) {
   700  	key, err := crypto.GenerateKey()
   701  	if err != nil || !validatePrivateKey(key) {
   702  		key, err = crypto.GenerateKey() // retry once
   703  	}
   704  	if err != nil {
   705  		return "", err
   706  	}
   707  	if !validatePrivateKey(key) {
   708  		return "", fmt.Errorf("failed to generate valid key")
   709  	}
   710  
   711  	id, err := toDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
   712  	if err != nil {
   713  		return "", err
   714  	}
   715  
   716  	w.keyMu.Lock()
   717  	defer w.keyMu.Unlock()
   718  
   719  	if w.privateKeys[id] != nil {
   720  		return "", fmt.Errorf("failed to generate unique ID")
   721  	}
   722  	w.privateKeys[id] = key
   723  	return id, nil
   724  }
   725  
   726  // DeleteKeyPair deletes the specified key if it exists.
   727  func (w *Waku) DeleteKeyPair(key string) bool {
   728  	deterministicID, err := toDeterministicID(key, common.KeyIDSize)
   729  	if err != nil {
   730  		return false
   731  	}
   732  
   733  	w.keyMu.Lock()
   734  	defer w.keyMu.Unlock()
   735  
   736  	if w.privateKeys[deterministicID] != nil {
   737  		delete(w.privateKeys, deterministicID)
   738  		return true
   739  	}
   740  	return false
   741  }
   742  
   743  // AddKeyPair imports a asymmetric private key and returns it identifier.
   744  func (w *Waku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
   745  	id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
   746  	if err != nil {
   747  		return "", err
   748  	}
   749  	if w.HasKeyPair(id) {
   750  		return id, nil // no need to re-inject
   751  	}
   752  
   753  	w.keyMu.Lock()
   754  	w.privateKeys[id] = key
   755  	w.keyMu.Unlock()
   756  
   757  	return id, nil
   758  }
   759  
   760  // SelectKeyPair adds cryptographic identity, and makes sure
   761  // that it is the only private key known to the node.
   762  func (w *Waku) SelectKeyPair(key *ecdsa.PrivateKey) error {
   763  	id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
   764  	if err != nil {
   765  		return err
   766  	}
   767  
   768  	w.keyMu.Lock()
   769  	defer w.keyMu.Unlock()
   770  
   771  	w.privateKeys = make(map[string]*ecdsa.PrivateKey) // reset key store
   772  	w.privateKeys[id] = key
   773  
   774  	return nil
   775  }
   776  
   777  // DeleteKeyPairs removes all cryptographic identities known to the node
   778  func (w *Waku) DeleteKeyPairs() error {
   779  	w.keyMu.Lock()
   780  	defer w.keyMu.Unlock()
   781  
   782  	w.privateKeys = make(map[string]*ecdsa.PrivateKey)
   783  
   784  	return nil
   785  }
   786  
   787  // HasKeyPair checks if the waku node is configured with the private key
   788  // of the specified public pair.
   789  func (w *Waku) HasKeyPair(id string) bool {
   790  	deterministicID, err := toDeterministicID(id, common.KeyIDSize)
   791  	if err != nil {
   792  		return false
   793  	}
   794  
   795  	w.keyMu.RLock()
   796  	defer w.keyMu.RUnlock()
   797  	return w.privateKeys[deterministicID] != nil
   798  }
   799  
   800  // GetPrivateKey retrieves the private key of the specified identity.
   801  func (w *Waku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
   802  	deterministicID, err := toDeterministicID(id, common.KeyIDSize)
   803  	if err != nil {
   804  		return nil, err
   805  	}
   806  
   807  	w.keyMu.RLock()
   808  	defer w.keyMu.RUnlock()
   809  	key := w.privateKeys[deterministicID]
   810  	if key == nil {
   811  		return nil, fmt.Errorf("invalid id")
   812  	}
   813  	return key, nil
   814  }
   815  
   816  // GenerateSymKey generates a random symmetric key and stores it under id,
   817  // which is then returned. Will be used in the future for session key exchange.
   818  func (w *Waku) GenerateSymKey() (string, error) {
   819  	key, err := common.GenerateSecureRandomData(common.AESKeyLength)
   820  	if err != nil {
   821  		return "", err
   822  	} else if !common.ValidateDataIntegrity(key, common.AESKeyLength) {
   823  		return "", fmt.Errorf("error in GenerateSymKey: crypto/rand failed to generate random data")
   824  	}
   825  
   826  	id, err := common.GenerateRandomID()
   827  	if err != nil {
   828  		return "", fmt.Errorf("failed to generate ID: %s", err)
   829  	}
   830  
   831  	w.keyMu.Lock()
   832  	defer w.keyMu.Unlock()
   833  
   834  	if w.symKeys[id] != nil {
   835  		return "", fmt.Errorf("failed to generate unique ID")
   836  	}
   837  	w.symKeys[id] = key
   838  	return id, nil
   839  }
   840  
   841  // AddSymKey stores the key with a given id.
   842  func (w *Waku) AddSymKey(id string, key []byte) (string, error) {
   843  	deterministicID, err := toDeterministicID(id, common.KeyIDSize)
   844  	if err != nil {
   845  		return "", err
   846  	}
   847  
   848  	w.keyMu.Lock()
   849  	defer w.keyMu.Unlock()
   850  
   851  	if w.symKeys[deterministicID] != nil {
   852  		return "", fmt.Errorf("key already exists: %v", id)
   853  	}
   854  	w.symKeys[deterministicID] = key
   855  	return deterministicID, nil
   856  }
   857  
   858  // AddSymKeyDirect stores the key, and returns its id.
   859  func (w *Waku) AddSymKeyDirect(key []byte) (string, error) {
   860  	if len(key) != common.AESKeyLength {
   861  		return "", fmt.Errorf("wrong key size: %d", len(key))
   862  	}
   863  
   864  	id, err := common.GenerateRandomID()
   865  	if err != nil {
   866  		return "", fmt.Errorf("failed to generate ID: %s", err)
   867  	}
   868  
   869  	w.keyMu.Lock()
   870  	defer w.keyMu.Unlock()
   871  
   872  	if w.symKeys[id] != nil {
   873  		return "", fmt.Errorf("failed to generate unique ID")
   874  	}
   875  	w.symKeys[id] = key
   876  	return id, nil
   877  }
   878  
   879  // AddSymKeyFromPassword generates the key from password, stores it, and returns its id.
   880  func (w *Waku) AddSymKeyFromPassword(password string) (string, error) {
   881  	id, err := common.GenerateRandomID()
   882  	if err != nil {
   883  		return "", fmt.Errorf("failed to generate ID: %s", err)
   884  	}
   885  	if w.HasSymKey(id) {
   886  		return "", fmt.Errorf("failed to generate unique ID")
   887  	}
   888  
   889  	// kdf should run no less than 0.1 seconds on an average computer,
   890  	// because it's an once in a session experience
   891  	derived := pbkdf2.Key([]byte(password), nil, 65356, common.AESKeyLength, sha256.New)
   892  
   893  	w.keyMu.Lock()
   894  	defer w.keyMu.Unlock()
   895  
   896  	// double check is necessary, because deriveKeyMaterial() is very slow
   897  	if w.symKeys[id] != nil {
   898  		return "", fmt.Errorf("critical error: failed to generate unique ID")
   899  	}
   900  	w.symKeys[id] = derived
   901  	return id, nil
   902  }
   903  
   904  // HasSymKey returns true if there is a key associated with the given id.
   905  // Otherwise returns false.
   906  func (w *Waku) HasSymKey(id string) bool {
   907  	w.keyMu.RLock()
   908  	defer w.keyMu.RUnlock()
   909  	return w.symKeys[id] != nil
   910  }
   911  
   912  // DeleteSymKey deletes the key associated with the name string if it exists.
   913  func (w *Waku) DeleteSymKey(id string) bool {
   914  	w.keyMu.Lock()
   915  	defer w.keyMu.Unlock()
   916  	if w.symKeys[id] != nil {
   917  		delete(w.symKeys, id)
   918  		return true
   919  	}
   920  	return false
   921  }
   922  
   923  // GetSymKey returns the symmetric key associated with the given id.
   924  func (w *Waku) GetSymKey(id string) ([]byte, error) {
   925  	w.keyMu.RLock()
   926  	defer w.keyMu.RUnlock()
   927  	if w.symKeys[id] != nil {
   928  		return w.symKeys[id], nil
   929  	}
   930  	return nil, fmt.Errorf("non-existent key ID")
   931  }
   932  
   933  // Subscribe installs a new message handler used for filtering, decrypting
   934  // and subsequent storing of incoming messages.
   935  func (w *Waku) Subscribe(f *common.Filter) (string, error) {
   936  	f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic)
   937  	id, err := w.filters.Install(f)
   938  	if err != nil {
   939  		return id, err
   940  	}
   941  
   942  	if w.cfg.LightClient {
   943  		cf := protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...)
   944  		w.filterManager.SubscribeFilter(id, cf)
   945  	}
   946  
   947  	return id, nil
   948  }
   949  
   950  // Unsubscribe removes an installed message handler.
   951  func (w *Waku) Unsubscribe(ctx context.Context, id string) error {
   952  	ok := w.filters.Uninstall(id)
   953  	if !ok {
   954  		return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
   955  	}
   956  
   957  	if w.cfg.LightClient {
   958  		w.filterManager.UnsubscribeFilter(id)
   959  	}
   960  
   961  	return nil
   962  }
   963  
   964  // GetFilter returns the filter by id.
   965  func (w *Waku) GetFilter(id string) *common.Filter {
   966  	return w.filters.Get(id)
   967  }
   968  
   969  // Unsubscribe removes an installed message handler.
   970  func (w *Waku) UnsubscribeMany(ids []string) error {
   971  	for _, id := range ids {
   972  		w.logger.Info("cleaning up filter", zap.String("id", id))
   973  		ok := w.filters.Uninstall(id)
   974  		if !ok {
   975  			w.logger.Warn("could not remove filter with id", zap.String("id", id))
   976  		}
   977  	}
   978  	return nil
   979  }
   980  
   981  func (w *Waku) SkipPublishToTopic(value bool) {
   982  	w.cfg.SkipPublishToTopic = value
   983  }
   984  
   985  func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
   986  	w.messageSender.MessagesDelivered(hashes)
   987  }
   988  
   989  func (w *Waku) SetStorePeerID(peerID peer.ID) {
   990  	w.messageSender.SetStorePeerID(peerID)
   991  }
   992  
   993  func (w *Waku) Query(ctx context.Context, peerID peer.ID, query store.FilterCriteria, cursor []byte, opts []store.RequestOption, processEnvelopes bool) ([]byte, int, error) {
   994  	requestID := protocol.GenerateRequestID()
   995  
   996  	opts = append(opts,
   997  		store.WithRequestID(requestID),
   998  		store.WithPeer(peerID),
   999  		store.WithCursor(cursor))
  1000  
  1001  	logger := w.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
  1002  
  1003  	logger.Debug("store.query",
  1004  		logutils.WakuMessageTimestamp("startTime", query.TimeStart),
  1005  		logutils.WakuMessageTimestamp("endTime", query.TimeEnd),
  1006  		zap.Strings("contentTopics", query.ContentTopics.ToList()),
  1007  		zap.String("pubsubTopic", query.PubsubTopic),
  1008  		zap.String("cursor", hexutil.Encode(cursor)),
  1009  	)
  1010  
  1011  	queryStart := time.Now()
  1012  	result, err := w.node.Store().Query(ctx, query, opts...)
  1013  	queryDuration := time.Since(queryStart)
  1014  	if err != nil {
  1015  		logger.Error("error querying storenode", zap.Error(err))
  1016  
  1017  		if w.onHistoricMessagesRequestFailed != nil {
  1018  			w.onHistoricMessagesRequestFailed(requestID, peerID, err)
  1019  		}
  1020  		return nil, 0, err
  1021  	}
  1022  
  1023  	messages := result.Messages()
  1024  	envelopesCount := len(messages)
  1025  	w.logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
  1026  	for _, mkv := range messages {
  1027  		msg := mkv.Message
  1028  
  1029  		// Temporarily setting RateLimitProof to nil so it matches the WakuMessage protobuffer we are sending
  1030  		// See https://github.com/vacp2p/rfc/issues/563
  1031  		mkv.Message.RateLimitProof = nil
  1032  
  1033  		envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), query.PubsubTopic)
  1034  
  1035  		err = w.OnNewEnvelopes(envelope, common.StoreMessageType, processEnvelopes)
  1036  		if err != nil {
  1037  			return nil, 0, err
  1038  		}
  1039  	}
  1040  
  1041  	return result.Cursor(), envelopesCount, nil
  1042  }
  1043  
  1044  // OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter.
  1045  func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error {
  1046  	return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
  1047  }
  1048  
  1049  // Start implements node.Service, starting the background data propagation thread
  1050  // of the Waku protocol.
  1051  func (w *Waku) Start() error {
  1052  	if w.ctx == nil {
  1053  		w.ctx, w.cancel = context.WithCancel(context.Background())
  1054  	}
  1055  
  1056  	var err error
  1057  	if w.node, err = node.New(w.options...); err != nil {
  1058  		return fmt.Errorf("failed to create a go-waku node: %v", err)
  1059  	}
  1060  
  1061  	w.goingOnline = make(chan struct{})
  1062  
  1063  	if err = w.node.Start(w.ctx); err != nil {
  1064  		return fmt.Errorf("failed to start go-waku node: %v", err)
  1065  	}
  1066  
  1067  	w.logger.Info("WakuV2 PeerID", zap.Stringer("id", w.node.Host().ID()))
  1068  
  1069  	w.discoverAndConnectPeers()
  1070  
  1071  	if w.cfg.EnableDiscV5 {
  1072  		err := w.node.DiscV5().Start(w.ctx)
  1073  		if err != nil {
  1074  			return err
  1075  		}
  1076  	}
  1077  
  1078  	w.wg.Add(1)
  1079  	go func() {
  1080  		defer w.wg.Done()
  1081  		ticker := time.NewTicker(5 * time.Second)
  1082  		defer ticker.Stop()
  1083  		for {
  1084  			select {
  1085  			case <-w.ctx.Done():
  1086  				return
  1087  			case <-ticker.C:
  1088  				w.checkForConnectionChanges()
  1089  			case <-w.topicHealthStatusChan:
  1090  				// TODO: https://github.com/status-im/status-go/issues/4628
  1091  			case <-w.connectionNotifChan:
  1092  				w.checkForConnectionChanges()
  1093  			}
  1094  		}
  1095  	}()
  1096  
  1097  	if w.cfg.TelemetryServerURL != "" {
  1098  		w.wg.Add(1)
  1099  		go func() {
  1100  			defer w.wg.Done()
  1101  			peerTelemetryTickerInterval := time.Duration(w.cfg.TelemetryPeerCountSendPeriod) * time.Millisecond
  1102  			if peerTelemetryTickerInterval == 0 {
  1103  				peerTelemetryTickerInterval = 10 * time.Second
  1104  			}
  1105  			peerTelemetryTicker := time.NewTicker(peerTelemetryTickerInterval)
  1106  			defer peerTelemetryTicker.Stop()
  1107  
  1108  			for {
  1109  				select {
  1110  				case <-w.ctx.Done():
  1111  					return
  1112  				case <-peerTelemetryTicker.C:
  1113  					w.reportPeerMetrics()
  1114  				}
  1115  			}
  1116  		}()
  1117  	}
  1118  
  1119  	w.wg.Add(1)
  1120  	go w.telemetryBandwidthStats(w.cfg.TelemetryServerURL)
  1121  	//TODO: commenting for now so that only fleet nodes are used.
  1122  	//Need to uncomment once filter peer scoring etc is implemented.
  1123  
  1124  	w.wg.Add(1)
  1125  	go w.runPeerExchangeLoop()
  1126  
  1127  	if w.cfg.EnableMissingMessageVerification {
  1128  
  1129  		w.missingMsgVerifier = missing.NewMissingMessageVerifier(
  1130  			w.node.Store(),
  1131  			w,
  1132  			w.node.Timesource(),
  1133  			w.logger)
  1134  
  1135  		w.missingMsgVerifier.Start(w.ctx)
  1136  
  1137  		w.wg.Add(1)
  1138  		go func() {
  1139  			w.wg.Done()
  1140  			for {
  1141  				select {
  1142  				case <-w.ctx.Done():
  1143  					return
  1144  				case envelope := <-w.missingMsgVerifier.C:
  1145  					err = w.OnNewEnvelopes(envelope, common.MissingMessageType, false)
  1146  					if err != nil {
  1147  						w.logger.Error("OnNewEnvelopes error", zap.Error(err))
  1148  					}
  1149  				}
  1150  			}
  1151  		}()
  1152  	}
  1153  
  1154  	if w.cfg.LightClient {
  1155  		// Create FilterManager that will main peer connectivity
  1156  		// for installed filters
  1157  		w.filterManager = filterapi.NewFilterManager(
  1158  			w.ctx,
  1159  			w.logger,
  1160  			w.cfg.MinPeersForFilter,
  1161  			w,
  1162  			w.node.FilterLightnode(),
  1163  			filterapi.WithBatchInterval(300*time.Millisecond))
  1164  	}
  1165  
  1166  	err = w.setupRelaySubscriptions()
  1167  	if err != nil {
  1168  		return err
  1169  	}
  1170  
  1171  	numCPU := runtime.NumCPU()
  1172  	for i := 0; i < numCPU; i++ {
  1173  		w.wg.Add(1)
  1174  		go w.processQueueLoop()
  1175  	}
  1176  
  1177  	w.wg.Add(1)
  1178  	go w.broadcast()
  1179  
  1180  	go w.sendQueue.Start(w.ctx)
  1181  
  1182  	err = w.startMessageSender()
  1183  	if err != nil {
  1184  		return err
  1185  	}
  1186  
  1187  	// we should wait `seedBootnodesForDiscV5` shutdown smoothly before set w.ctx to nil within `w.Stop()`
  1188  	w.wg.Add(1)
  1189  	go w.seedBootnodesForDiscV5()
  1190  
  1191  	return nil
  1192  }
  1193  
  1194  func (w *Waku) checkForConnectionChanges() {
  1195  
  1196  	isOnline := len(w.node.Host().Network().Peers()) > 0
  1197  
  1198  	w.connStatusMu.Lock()
  1199  
  1200  	latestConnStatus := types.ConnStatus{
  1201  		IsOnline: isOnline,
  1202  		Peers:    FormatPeerStats(w.node),
  1203  	}
  1204  
  1205  	w.logger.Debug("peer stats",
  1206  		zap.Int("peersCount", len(latestConnStatus.Peers)),
  1207  		zap.Any("stats", latestConnStatus))
  1208  	for k, subs := range w.connStatusSubscriptions {
  1209  		if !subs.Send(latestConnStatus) {
  1210  			delete(w.connStatusSubscriptions, k)
  1211  		}
  1212  	}
  1213  
  1214  	w.connStatusMu.Unlock()
  1215  
  1216  	if w.onPeerStats != nil {
  1217  		w.onPeerStats(latestConnStatus)
  1218  	}
  1219  
  1220  	w.ConnectionChanged(connection.State{
  1221  		Type:    w.state.Type, //setting state type as previous one since there won't be a change here
  1222  		Offline: !latestConnStatus.IsOnline,
  1223  	})
  1224  }
  1225  
  1226  func (w *Waku) reportPeerMetrics() {
  1227  	if w.statusTelemetryClient != nil {
  1228  		connFailures := FormatPeerConnFailures(w.node)
  1229  		w.statusTelemetryClient.PushPeerCount(w.ctx, w.PeerCount())
  1230  		w.statusTelemetryClient.PushPeerConnFailures(w.ctx, connFailures)
  1231  
  1232  		peerCountByOrigin := make(map[wps.Origin]uint)
  1233  		peerCountByShard := make(map[uint16]uint)
  1234  		wakuPeerStore := w.node.Host().Peerstore().(wps.WakuPeerstore)
  1235  
  1236  		for _, peerID := range w.node.Host().Network().Peers() {
  1237  			origin, err := wakuPeerStore.Origin(peerID)
  1238  			if err != nil {
  1239  				origin = wps.Unknown
  1240  			}
  1241  
  1242  			peerCountByOrigin[origin]++
  1243  			pubsubTopics, err := wakuPeerStore.PubSubTopics(peerID)
  1244  			if err != nil {
  1245  				continue
  1246  			}
  1247  
  1248  			keys := make([]string, 0, len(pubsubTopics))
  1249  			for k := range pubsubTopics {
  1250  				keys = append(keys, k)
  1251  			}
  1252  			relayShards, err := protocol.TopicsToRelayShards(keys...)
  1253  			if err != nil {
  1254  				continue
  1255  			}
  1256  
  1257  			for _, shards := range relayShards {
  1258  				for _, shard := range shards.ShardIDs {
  1259  					peerCountByShard[shard]++
  1260  				}
  1261  			}
  1262  		}
  1263  		w.statusTelemetryClient.PushPeerCountByShard(w.ctx, peerCountByShard)
  1264  		w.statusTelemetryClient.PushPeerCountByOrigin(w.ctx, peerCountByOrigin)
  1265  	}
  1266  }
  1267  
  1268  func (w *Waku) startMessageSender() error {
  1269  	publishMethod := publish.Relay
  1270  	if w.cfg.LightClient {
  1271  		publishMethod = publish.LightPush
  1272  	}
  1273  
  1274  	sender, err := publish.NewMessageSender(publishMethod, w.node.Lightpush(), w.node.Relay(), w.logger)
  1275  	if err != nil {
  1276  		w.logger.Error("failed to create message sender", zap.Error(err))
  1277  		return err
  1278  	}
  1279  
  1280  	if w.cfg.EnableStoreConfirmationForMessagesSent {
  1281  		msgStoredChan := make(chan gethcommon.Hash, 1000)
  1282  		msgExpiredChan := make(chan gethcommon.Hash, 1000)
  1283  		messageSentCheck := publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), msgStoredChan, msgExpiredChan, w.logger)
  1284  		sender.WithMessageSentCheck(messageSentCheck)
  1285  
  1286  		w.wg.Add(1)
  1287  		go func() {
  1288  			defer w.wg.Done()
  1289  			for {
  1290  				select {
  1291  				case <-w.ctx.Done():
  1292  					return
  1293  				case hash := <-msgStoredChan:
  1294  					w.SendEnvelopeEvent(common.EnvelopeEvent{
  1295  						Hash:  hash,
  1296  						Event: common.EventEnvelopeSent,
  1297  					})
  1298  					if w.statusTelemetryClient != nil {
  1299  						w.statusTelemetryClient.PushMessageCheckSuccess(w.ctx, hash.Hex())
  1300  					}
  1301  				case hash := <-msgExpiredChan:
  1302  					w.SendEnvelopeEvent(common.EnvelopeEvent{
  1303  						Hash:  hash,
  1304  						Event: common.EventEnvelopeExpired,
  1305  					})
  1306  					if w.statusTelemetryClient != nil {
  1307  						w.statusTelemetryClient.PushMessageCheckFailure(w.ctx, hash.Hex())
  1308  					}
  1309  				}
  1310  			}
  1311  		}()
  1312  	}
  1313  
  1314  	if !w.cfg.UseThrottledPublish || testing.Testing() {
  1315  		// To avoid delaying the tests, or for when we dont want to rate limit, we set up an infinite rate limiter,
  1316  		// basically disabling the rate limit functionality
  1317  		limiter := publish.NewPublishRateLimiter(rate.Inf, 1)
  1318  		sender.WithRateLimiting(limiter)
  1319  	}
  1320  
  1321  	w.messageSender = sender
  1322  	w.messageSender.Start()
  1323  
  1324  	return nil
  1325  }
  1326  
  1327  func (w *Waku) MessageExists(mh pb.MessageHash) (bool, error) {
  1328  	w.poolMu.Lock()
  1329  	defer w.poolMu.Unlock()
  1330  	return w.envelopeCache.Has(gethcommon.Hash(mh)), nil
  1331  }
  1332  
  1333  func (w *Waku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
  1334  	if !w.cfg.EnableMissingMessageVerification {
  1335  		return
  1336  	}
  1337  
  1338  	w.missingMsgVerifier.SetCriteriaInterest(peerID, protocol.NewContentFilter(pubsubTopic, contentTopics...))
  1339  }
  1340  
  1341  func (w *Waku) setupRelaySubscriptions() error {
  1342  	if w.cfg.LightClient {
  1343  		return nil
  1344  	}
  1345  
  1346  	if w.protectedTopicStore != nil {
  1347  		protectedTopics, err := w.protectedTopicStore.ProtectedTopics()
  1348  		if err != nil {
  1349  			return err
  1350  		}
  1351  
  1352  		for _, pt := range protectedTopics {
  1353  			// Adding subscription to protected topics
  1354  			err = w.subscribeToPubsubTopicWithWakuRelay(pt.Topic, pt.PubKey)
  1355  			if err != nil {
  1356  				return err
  1357  			}
  1358  		}
  1359  	}
  1360  
  1361  	err := w.subscribeToPubsubTopicWithWakuRelay(w.cfg.DefaultShardPubsubTopic, nil)
  1362  	if err != nil {
  1363  		return err
  1364  	}
  1365  
  1366  	return nil
  1367  }
  1368  
  1369  // Stop implements node.Service, stopping the background data propagation thread
  1370  // of the Waku protocol.
  1371  func (w *Waku) Stop() error {
  1372  	w.cancel()
  1373  
  1374  	w.envelopeCache.Stop()
  1375  
  1376  	w.node.Stop()
  1377  
  1378  	if w.protectedTopicStore != nil {
  1379  		err := w.protectedTopicStore.Close()
  1380  		if err != nil {
  1381  			return err
  1382  		}
  1383  	}
  1384  
  1385  	close(w.goingOnline)
  1386  	w.wg.Wait()
  1387  
  1388  	w.ctx = nil
  1389  	w.cancel = nil
  1390  
  1391  	return nil
  1392  }
  1393  
  1394  func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
  1395  	if envelope == nil {
  1396  		return nil
  1397  	}
  1398  
  1399  	recvMessage := common.NewReceivedMessage(envelope, msgType)
  1400  	if recvMessage == nil {
  1401  		return nil
  1402  	}
  1403  
  1404  	if w.statusTelemetryClient != nil {
  1405  		w.statusTelemetryClient.PushReceivedEnvelope(w.ctx, envelope)
  1406  	}
  1407  
  1408  	logger := w.logger.With(
  1409  		zap.String("messageType", msgType),
  1410  		zap.Stringer("envelopeHash", envelope.Hash()),
  1411  		zap.String("pubsubTopic", envelope.PubsubTopic()),
  1412  		zap.String("contentTopic", envelope.Message().ContentTopic),
  1413  		logutils.WakuMessageTimestamp("timestamp", envelope.Message().Timestamp),
  1414  	)
  1415  
  1416  	logger.Debug("received new envelope")
  1417  	trouble := false
  1418  
  1419  	_, err := w.add(recvMessage, processImmediately)
  1420  	if err != nil {
  1421  		logger.Info("invalid envelope received", zap.Error(err))
  1422  		trouble = true
  1423  	}
  1424  
  1425  	common.EnvelopesValidatedCounter.Inc()
  1426  
  1427  	if trouble {
  1428  		return errors.New("received invalid envelope")
  1429  	}
  1430  
  1431  	return nil
  1432  }
  1433  
  1434  // addEnvelope adds an envelope to the envelope map, used for sending
  1435  func (w *Waku) addEnvelope(envelope *common.ReceivedMessage) {
  1436  	w.poolMu.Lock()
  1437  	w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL)
  1438  	w.poolMu.Unlock()
  1439  }
  1440  
  1441  func (w *Waku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
  1442  	common.EnvelopesReceivedCounter.Inc()
  1443  
  1444  	w.poolMu.Lock()
  1445  	envelope := w.envelopeCache.Get(recvMessage.Hash())
  1446  	alreadyCached := envelope != nil
  1447  	w.poolMu.Unlock()
  1448  
  1449  	if !alreadyCached {
  1450  		recvMessage.Processed.Store(false)
  1451  		w.addEnvelope(recvMessage)
  1452  	}
  1453  
  1454  	logger := w.logger.With(zap.String("envelopeHash", recvMessage.Hash().Hex()))
  1455  
  1456  	if alreadyCached {
  1457  		logger.Debug("w envelope already cached")
  1458  		common.EnvelopesCachedCounter.WithLabelValues("hit").Inc()
  1459  	} else {
  1460  		logger.Debug("cached w envelope")
  1461  		common.EnvelopesCachedCounter.WithLabelValues("miss").Inc()
  1462  		common.EnvelopesSizeMeter.Observe(float64(len(recvMessage.Envelope.Message().Payload)))
  1463  	}
  1464  
  1465  	if !alreadyCached || !envelope.Value().Processed.Load() {
  1466  		if processImmediately {
  1467  			logger.Debug("immediately processing envelope")
  1468  			w.processMessage(recvMessage)
  1469  		} else {
  1470  			logger.Debug("posting event")
  1471  			w.postEvent(recvMessage) // notify the local node about the new message
  1472  		}
  1473  	}
  1474  
  1475  	return true, nil
  1476  }
  1477  
  1478  // postEvent queues the message for further processing.
  1479  func (w *Waku) postEvent(envelope *common.ReceivedMessage) {
  1480  	w.msgQueue <- envelope
  1481  }
  1482  
  1483  // processQueueLoop delivers the messages to the watchers during the lifetime of the waku node.
  1484  func (w *Waku) processQueueLoop() {
  1485  	defer w.wg.Done()
  1486  	if w.ctx == nil {
  1487  		return
  1488  	}
  1489  	for {
  1490  		select {
  1491  		case <-w.ctx.Done():
  1492  			return
  1493  		case e := <-w.msgQueue:
  1494  			w.processMessage(e)
  1495  		}
  1496  	}
  1497  }
  1498  
  1499  func (w *Waku) processMessage(e *common.ReceivedMessage) {
  1500  	logger := w.logger.With(
  1501  		zap.Stringer("envelopeHash", e.Envelope.Hash()),
  1502  		zap.String("pubsubTopic", e.PubsubTopic),
  1503  		zap.String("contentTopic", e.ContentTopic.ContentTopic()),
  1504  		zap.Int64("timestamp", e.Envelope.Message().GetTimestamp()),
  1505  	)
  1506  
  1507  	if e.MsgType == common.StoreMessageType {
  1508  		// We need to insert it first, and then remove it if not matched,
  1509  		// as messages are processed asynchronously
  1510  		w.storeMsgIDsMu.Lock()
  1511  		w.storeMsgIDs[e.Hash()] = true
  1512  		w.storeMsgIDsMu.Unlock()
  1513  	}
  1514  
  1515  	matched := w.filters.NotifyWatchers(e)
  1516  
  1517  	// If not matched we remove it
  1518  	if !matched {
  1519  		logger.Debug("filters did not match")
  1520  		w.storeMsgIDsMu.Lock()
  1521  		delete(w.storeMsgIDs, e.Hash())
  1522  		w.storeMsgIDsMu.Unlock()
  1523  	} else {
  1524  		logger.Debug("filters did match")
  1525  		e.Processed.Store(true)
  1526  	}
  1527  
  1528  	w.envelopeFeed.Send(common.EnvelopeEvent{
  1529  		Topic: e.ContentTopic,
  1530  		Hash:  e.Hash(),
  1531  		Event: common.EventEnvelopeAvailable,
  1532  	})
  1533  }
  1534  
  1535  // GetEnvelope retrieves an envelope from the message queue by its hash.
  1536  // It returns nil if the envelope can not be found.
  1537  func (w *Waku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
  1538  	w.poolMu.RLock()
  1539  	defer w.poolMu.RUnlock()
  1540  
  1541  	envelope := w.envelopeCache.Get(hash)
  1542  	if envelope == nil {
  1543  		return nil
  1544  	}
  1545  
  1546  	return envelope.Value()
  1547  }
  1548  
  1549  // isEnvelopeCached checks if envelope with specific hash has already been received and cached.
  1550  func (w *Waku) IsEnvelopeCached(hash gethcommon.Hash) bool {
  1551  	w.poolMu.Lock()
  1552  	defer w.poolMu.Unlock()
  1553  
  1554  	return w.envelopeCache.Has(hash)
  1555  }
  1556  
  1557  func (w *Waku) ClearEnvelopesCache() {
  1558  	w.poolMu.Lock()
  1559  	defer w.poolMu.Unlock()
  1560  
  1561  	w.envelopeCache.Stop()
  1562  	w.envelopeCache = newTTLCache()
  1563  }
  1564  
  1565  func (w *Waku) PeerCount() int {
  1566  	return w.node.PeerCount()
  1567  }
  1568  
  1569  func (w *Waku) Peers() types.PeerStats {
  1570  	return FormatPeerStats(w.node)
  1571  }
  1572  
  1573  func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
  1574  	if w.cfg.LightClient {
  1575  		return nil, errors.New("only available in relay mode")
  1576  	}
  1577  
  1578  	return &types.PeerList{
  1579  		FullMeshPeers: w.node.Relay().PubSub().MeshPeers(topic),
  1580  		AllPeers:      w.node.Relay().PubSub().ListPeers(topic),
  1581  	}, nil
  1582  }
  1583  
  1584  func (w *Waku) ListenAddresses() []multiaddr.Multiaddr {
  1585  	return w.node.ListenAddresses()
  1586  }
  1587  
  1588  func (w *Waku) ENR() (*enode.Node, error) {
  1589  	enr := w.node.ENR()
  1590  	if enr == nil {
  1591  		return nil, errors.New("enr not available")
  1592  	}
  1593  
  1594  	return enr, nil
  1595  }
  1596  
  1597  func (w *Waku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
  1598  	topic = w.GetPubsubTopic(topic)
  1599  
  1600  	if !w.cfg.LightClient {
  1601  		err := w.subscribeToPubsubTopicWithWakuRelay(topic, pubkey)
  1602  		if err != nil {
  1603  			return err
  1604  		}
  1605  	}
  1606  	return nil
  1607  }
  1608  
  1609  func (w *Waku) UnsubscribeFromPubsubTopic(topic string) error {
  1610  	topic = w.GetPubsubTopic(topic)
  1611  
  1612  	if !w.cfg.LightClient {
  1613  		err := w.unsubscribeFromPubsubTopicWithWakuRelay(topic)
  1614  		if err != nil {
  1615  			return err
  1616  		}
  1617  	}
  1618  	return nil
  1619  }
  1620  
  1621  func (w *Waku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
  1622  	topic = w.GetPubsubTopic(topic)
  1623  	if w.protectedTopicStore == nil {
  1624  		return nil, nil
  1625  	}
  1626  
  1627  	return w.protectedTopicStore.FetchPrivateKey(topic)
  1628  }
  1629  
  1630  func (w *Waku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
  1631  	topic = w.GetPubsubTopic(topic)
  1632  	if w.protectedTopicStore == nil {
  1633  		return nil
  1634  	}
  1635  
  1636  	return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey)
  1637  }
  1638  
  1639  func (w *Waku) RemovePubsubTopicKey(topic string) error {
  1640  	topic = w.GetPubsubTopic(topic)
  1641  	if w.protectedTopicStore == nil {
  1642  		return nil
  1643  	}
  1644  
  1645  	return w.protectedTopicStore.Delete(topic)
  1646  }
  1647  
  1648  func (w *Waku) StartDiscV5() error {
  1649  	if w.node.DiscV5() == nil {
  1650  		return errors.New("discv5 is not setup")
  1651  	}
  1652  
  1653  	return w.node.DiscV5().Start(w.ctx)
  1654  }
  1655  
  1656  func (w *Waku) StopDiscV5() error {
  1657  	if w.node.DiscV5() == nil {
  1658  		return errors.New("discv5 is not setup")
  1659  	}
  1660  
  1661  	w.node.DiscV5().Stop()
  1662  	return nil
  1663  }
  1664  
  1665  func (w *Waku) handleNetworkChangeFromApp(state connection.State) {
  1666  	//If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers
  1667  	if (state.Offline && len(w.node.Host().Network().Peers()) > 0) ||
  1668  		(w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular
  1669  		w.logger.Info("connection switched or offline detected via mobile, disconnecting all peers")
  1670  		w.node.DisconnectAllPeers()
  1671  		if w.cfg.LightClient {
  1672  			w.filterManager.NetworkChange()
  1673  		}
  1674  	}
  1675  }
  1676  
  1677  func (w *Waku) ConnectionChanged(state connection.State) {
  1678  	isOnline := !state.Offline
  1679  	if w.cfg.LightClient {
  1680  		//TODO: Update this as per  https://github.com/waku-org/go-waku/issues/1114
  1681  		go w.filterManager.OnConnectionStatusChange("", isOnline)
  1682  		w.handleNetworkChangeFromApp(state)
  1683  	} else {
  1684  		// for lightClient state update and onlineChange is handled in filterManager.
  1685  		// going online
  1686  		if isOnline && !w.onlineChecker.IsOnline() {
  1687  			//TODO: analyze if we need to discover and connect to peers for relay.
  1688  			w.discoverAndConnectPeers()
  1689  			select {
  1690  			case w.goingOnline <- struct{}{}:
  1691  			default:
  1692  				w.logger.Warn("could not write on connection changed channel")
  1693  			}
  1694  		}
  1695  		// update state
  1696  		w.onlineChecker.SetOnline(isOnline)
  1697  	}
  1698  	w.state = state
  1699  }
  1700  
  1701  // seedBootnodesForDiscV5 tries to fetch bootnodes
  1702  // from an ENR periodically.
  1703  // It backs off exponentially until maxRetries, at which point it restarts from 0
  1704  // It also restarts if there's a connection change signalled from the client
  1705  func (w *Waku) seedBootnodesForDiscV5() {
  1706  	defer w.wg.Done()
  1707  
  1708  	if !w.cfg.EnableDiscV5 || w.node.DiscV5() == nil {
  1709  		return
  1710  	}
  1711  
  1712  	ticker := time.NewTicker(500 * time.Millisecond)
  1713  	defer ticker.Stop()
  1714  	var retries = 0
  1715  
  1716  	now := func() int64 {
  1717  		return time.Now().UnixNano() / int64(time.Millisecond)
  1718  
  1719  	}
  1720  
  1721  	var lastTry = now()
  1722  
  1723  	canQuery := func() bool {
  1724  		backoff := bootnodesQueryBackoffMs * int64(math.Exp2(float64(retries)))
  1725  
  1726  		return lastTry+backoff < now()
  1727  	}
  1728  
  1729  	for {
  1730  		select {
  1731  		case <-ticker.C:
  1732  			if w.seededBootnodesForDiscV5 && len(w.node.Host().Network().Peers()) > 3 {
  1733  				w.logger.Debug("not querying bootnodes", zap.Bool("seeded", w.seededBootnodesForDiscV5), zap.Int("peer-count", len(w.node.Host().Network().Peers())))
  1734  				continue
  1735  			}
  1736  			if canQuery() {
  1737  				w.logger.Info("querying bootnodes to restore connectivity", zap.Int("peer-count", len(w.node.Host().Network().Peers())))
  1738  				err := w.restartDiscV5()
  1739  				if err != nil {
  1740  					w.logger.Warn("failed to restart discv5", zap.Error(err))
  1741  				}
  1742  
  1743  				lastTry = now()
  1744  				retries++
  1745  				// We reset the retries after a while and restart
  1746  				if retries > bootnodesMaxRetries {
  1747  					retries = 0
  1748  				}
  1749  
  1750  			} else {
  1751  				w.logger.Info("can't query bootnodes", zap.Int("peer-count", len(w.node.Host().Network().Peers())), zap.Int64("lastTry", lastTry), zap.Int64("now", now()), zap.Int64("backoff", bootnodesQueryBackoffMs*int64(math.Exp2(float64(retries)))), zap.Int("retries", retries))
  1752  
  1753  			}
  1754  		// If we go online, trigger immediately
  1755  		case <-w.goingOnline:
  1756  			if w.cfg.EnableDiscV5 {
  1757  				if canQuery() {
  1758  					err := w.restartDiscV5()
  1759  					if err != nil {
  1760  						w.logger.Warn("failed to restart discv5", zap.Error(err))
  1761  					}
  1762  
  1763  				}
  1764  				retries = 0
  1765  				lastTry = now()
  1766  			}
  1767  
  1768  		case <-w.ctx.Done():
  1769  			w.logger.Debug("bootnode seeding stopped")
  1770  			return
  1771  		}
  1772  	}
  1773  }
  1774  
  1775  // Restart discv5, re-retrieving bootstrap nodes
  1776  func (w *Waku) restartDiscV5() error {
  1777  	ctx, cancel := context.WithTimeout(w.ctx, 30*time.Second)
  1778  	defer cancel()
  1779  	bootnodes, err := w.getDiscV5BootstrapNodes(ctx, w.discV5BootstrapNodes)
  1780  	if err != nil {
  1781  		return err
  1782  	}
  1783  	if len(bootnodes) == 0 {
  1784  		return errors.New("failed to fetch bootnodes")
  1785  	}
  1786  
  1787  	if w.node.DiscV5().ErrOnNotRunning() != nil {
  1788  		w.logger.Info("is not started restarting")
  1789  		err := w.node.DiscV5().Start(w.ctx)
  1790  		if err != nil {
  1791  			w.logger.Error("Could not start DiscV5", zap.Error(err))
  1792  		}
  1793  	} else {
  1794  		w.node.DiscV5().Stop()
  1795  		w.logger.Info("is started restarting")
  1796  
  1797  		select {
  1798  		case <-w.ctx.Done(): // Don't start discv5 if we are stopping waku
  1799  			return nil
  1800  		default:
  1801  		}
  1802  
  1803  		err := w.node.DiscV5().Start(w.ctx)
  1804  		if err != nil {
  1805  			w.logger.Error("Could not start DiscV5", zap.Error(err))
  1806  		}
  1807  	}
  1808  
  1809  	w.logger.Info("restarting discv5 with nodes", zap.Any("nodes", bootnodes))
  1810  	return w.node.SetDiscV5Bootnodes(bootnodes)
  1811  }
  1812  
  1813  func (w *Waku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
  1814  	peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300)
  1815  	if err != nil {
  1816  		return "", err
  1817  	}
  1818  	return peerID, nil
  1819  }
  1820  
  1821  func (w *Waku) timestamp() int64 {
  1822  	return w.timesource.Now().UnixNano()
  1823  }
  1824  
  1825  func (w *Waku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
  1826  	peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200)
  1827  	if err != nil {
  1828  		return "", err
  1829  	}
  1830  	return peerID, nil
  1831  }
  1832  
  1833  func (w *Waku) DialPeer(address multiaddr.Multiaddr) error {
  1834  	ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
  1835  	defer cancel()
  1836  	return w.node.DialPeerWithMultiAddress(ctx, address)
  1837  }
  1838  
  1839  func (w *Waku) DialPeerByID(peerID peer.ID) error {
  1840  	ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
  1841  	defer cancel()
  1842  	return w.node.DialPeerByID(ctx, peerID)
  1843  }
  1844  
  1845  func (w *Waku) DropPeer(peerID peer.ID) error {
  1846  	return w.node.ClosePeerById(peerID)
  1847  }
  1848  
  1849  func (w *Waku) ProcessingP2PMessages() bool {
  1850  	w.storeMsgIDsMu.Lock()
  1851  	defer w.storeMsgIDsMu.Unlock()
  1852  	return len(w.storeMsgIDs) != 0
  1853  }
  1854  
  1855  func (w *Waku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
  1856  	w.storeMsgIDsMu.Lock()
  1857  	defer w.storeMsgIDsMu.Unlock()
  1858  	delete(w.storeMsgIDs, hash)
  1859  }
  1860  
  1861  func (w *Waku) Clean() error {
  1862  	w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit)
  1863  
  1864  	for _, f := range w.filters.All() {
  1865  		f.Messages = common.NewMemoryMessageStore()
  1866  	}
  1867  
  1868  	return nil
  1869  }
  1870  
  1871  func (w *Waku) PeerID() peer.ID {
  1872  	return w.node.Host().ID()
  1873  }
  1874  
  1875  func (w *Waku) PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
  1876  	pingResultCh := ping.Ping(ctx, w.node.Host(), peerID)
  1877  	select {
  1878  	case <-ctx.Done():
  1879  		return 0, ctx.Err()
  1880  	case r := <-pingResultCh:
  1881  		if r.Error != nil {
  1882  			return 0, r.Error
  1883  		}
  1884  		return r.RTT, nil
  1885  	}
  1886  }
  1887  
  1888  func (w *Waku) Peerstore() peerstore.Peerstore {
  1889  	return w.node.Host().Peerstore()
  1890  }
  1891  
  1892  // validatePrivateKey checks the format of the given private key.
  1893  func validatePrivateKey(k *ecdsa.PrivateKey) bool {
  1894  	if k == nil || k.D == nil || k.D.Sign() == 0 {
  1895  		return false
  1896  	}
  1897  	return common.ValidatePublicKey(&k.PublicKey)
  1898  }
  1899  
  1900  // makeDeterministicID generates a deterministic ID, based on a given input
  1901  func makeDeterministicID(input string, keyLen int) (id string, err error) {
  1902  	buf := pbkdf2.Key([]byte(input), nil, 4096, keyLen, sha256.New)
  1903  	if !common.ValidateDataIntegrity(buf, common.KeyIDSize) {
  1904  		return "", fmt.Errorf("error in GenerateDeterministicID: failed to generate key")
  1905  	}
  1906  	id = gethcommon.Bytes2Hex(buf)
  1907  	return id, err
  1908  }
  1909  
  1910  // toDeterministicID reviews incoming id, and transforms it to format
  1911  // expected internally be private key store. Originally, public keys
  1912  // were used as keys, now random keys are being used. And in order to
  1913  // make it easier to consume, we now allow both random IDs and public
  1914  // keys to be passed.
  1915  func toDeterministicID(id string, expectedLen int) (string, error) {
  1916  	if len(id) != (expectedLen * 2) { // we received hex key, so number of chars in id is doubled
  1917  		var err error
  1918  		id, err = makeDeterministicID(id, expectedLen)
  1919  		if err != nil {
  1920  			return "", err
  1921  		}
  1922  	}
  1923  
  1924  	return id, nil
  1925  }
  1926  
  1927  func FormatPeerStats(wakuNode *node.WakuNode) types.PeerStats {
  1928  	p := make(types.PeerStats)
  1929  	for k, v := range wakuNode.PeerStats() {
  1930  		p[k] = types.WakuV2Peer{
  1931  			Addresses: utils.EncapsulatePeerID(k, wakuNode.Host().Peerstore().PeerInfo(k).Addrs...),
  1932  			Protocols: v,
  1933  		}
  1934  	}
  1935  	return p
  1936  }
  1937  
  1938  func (w *Waku) StoreNode() *store.WakuStore {
  1939  	return w.node.Store()
  1940  }
  1941  
  1942  func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int {
  1943  	p := make(map[string]int)
  1944  	for _, peerID := range wakuNode.Host().Network().Peers() {
  1945  		peerInfo := wakuNode.Host().Peerstore().PeerInfo(peerID)
  1946  		connFailures := wakuNode.Host().Peerstore().(wps.WakuPeerstore).ConnFailures(peerInfo.ID)
  1947  		if connFailures > 0 {
  1948  			p[peerID.String()] = connFailures
  1949  		}
  1950  	}
  1951  	return p
  1952  }
  1953  
  1954  func (w *Waku) LegacyStoreNode() legacy_store.Store {
  1955  	return w.node.LegacyStore()
  1956  }