github.com/decred/dcrlnd@v0.7.6/discovery/sync_manager_test.go (about)

     1  package discovery
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"sync"
     7  	"sync/atomic"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/davecgh/go-spew/spew"
    12  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    13  	"github.com/decred/dcrlnd/lntest/wait"
    14  	"github.com/decred/dcrlnd/lnwire"
    15  	"github.com/decred/dcrlnd/routing/route"
    16  	"github.com/decred/dcrlnd/ticker"
    17  	"github.com/stretchr/testify/require"
    18  )
    19  
    20  // randPeer creates a random peer.
    21  func randPeer(t *testing.T, quit chan struct{}) *mockPeer {
    22  	t.Helper()
    23  
    24  	pk := randPubKey(t)
    25  	return peerWithPubkey(pk, quit)
    26  }
    27  
    28  func peerWithPubkey(pk *secp256k1.PublicKey, quit chan struct{}) *mockPeer {
    29  	return &mockPeer{
    30  		pk:       pk,
    31  		sentMsgs: make(chan lnwire.Message),
    32  		quit:     quit,
    33  	}
    34  }
    35  
    36  // newTestSyncManager creates a new test SyncManager using mock implementations
    37  // of its dependencies.
    38  func newTestSyncManager(numActiveSyncers int) *SyncManager {
    39  	return newPinnedTestSyncManager(numActiveSyncers, nil)
    40  }
    41  
    42  // newTestSyncManager creates a new test SyncManager with a set of pinned
    43  // syncers using mock implementations of its dependencies.
    44  func newPinnedTestSyncManager(numActiveSyncers int,
    45  	pinnedSyncers PinnedSyncers) *SyncManager {
    46  
    47  	hID := lnwire.ShortChannelID{BlockHeight: latestKnownHeight}
    48  	return newSyncManager(&SyncManagerCfg{
    49  		ChanSeries:           newMockChannelGraphTimeSeries(hID),
    50  		GossiperState:        newMockGossipState(),
    51  		RotateTicker:         ticker.NewForce(DefaultSyncerRotationInterval),
    52  		HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval),
    53  		NumActiveSyncers:     numActiveSyncers,
    54  		BestHeight: func() uint32 {
    55  			return latestKnownHeight
    56  		},
    57  		PinnedSyncers: pinnedSyncers,
    58  	})
    59  }
    60  
    61  // TestSyncManagerNumActiveSyncers ensures that we are unable to have more than
    62  // NumActiveSyncers active syncers.
    63  func TestSyncManagerNumActiveSyncers(t *testing.T) {
    64  	t.Parallel()
    65  
    66  	// We'll start by creating our test sync manager which will hold up to
    67  	// 3 active syncers.
    68  	const numActiveSyncers = 3
    69  	const numPinnedSyncers = 3
    70  	const numInactiveSyncers = 1
    71  
    72  	pinnedSyncers := make(PinnedSyncers)
    73  	pinnedPubkeys := make(map[route.Vertex]*secp256k1.PublicKey)
    74  	for i := 0; i < numPinnedSyncers; i++ {
    75  		pubkey := randPubKey(t)
    76  		vertex := route.NewVertex(pubkey)
    77  
    78  		pinnedSyncers[vertex] = struct{}{}
    79  		pinnedPubkeys[vertex] = pubkey
    80  
    81  	}
    82  
    83  	syncMgr := newPinnedTestSyncManager(numActiveSyncers, pinnedSyncers)
    84  	syncMgr.Start()
    85  	defer syncMgr.Stop()
    86  
    87  	// First we'll start by adding the pinned syncers. These should
    88  	// immediately be assigned PinnedSync.
    89  	for _, pubkey := range pinnedPubkeys {
    90  		peer := peerWithPubkey(pubkey, syncMgr.quit)
    91  		err := syncMgr.InitSyncState(peer)
    92  		require.NoError(t, err)
    93  
    94  		s := assertSyncerExistence(t, syncMgr, peer)
    95  		assertTransitionToChansSynced(t, s, peer)
    96  		assertActiveGossipTimestampRange(t, peer)
    97  		assertSyncerStatus(t, s, chansSynced, PinnedSync)
    98  	}
    99  
   100  	// We'll go ahead and create our syncers. We'll gather the ones which
   101  	// should be active and passive to check them later on. The pinned peers
   102  	// added above should not influence the active syncer count.
   103  	for i := 0; i < numActiveSyncers; i++ {
   104  		peer := randPeer(t, syncMgr.quit)
   105  		err := syncMgr.InitSyncState(peer)
   106  		require.NoError(t, err)
   107  
   108  		s := assertSyncerExistence(t, syncMgr, peer)
   109  
   110  		// The first syncer registered always attempts a historical
   111  		// sync.
   112  		if i == 0 {
   113  			assertTransitionToChansSynced(t, s, peer)
   114  		}
   115  		assertActiveGossipTimestampRange(t, peer)
   116  		assertSyncerStatus(t, s, chansSynced, ActiveSync)
   117  	}
   118  
   119  	for i := 0; i < numInactiveSyncers; i++ {
   120  		peer := randPeer(t, syncMgr.quit)
   121  		err := syncMgr.InitSyncState(peer)
   122  		require.NoError(t, err)
   123  
   124  		s := assertSyncerExistence(t, syncMgr, peer)
   125  		assertSyncerStatus(t, s, chansSynced, PassiveSync)
   126  	}
   127  }
   128  
   129  // TestSyncManagerNewActiveSyncerAfterDisconnect ensures that we can regain an
   130  // active syncer after losing one due to the peer disconnecting.
   131  func TestSyncManagerNewActiveSyncerAfterDisconnect(t *testing.T) {
   132  	t.Parallel()
   133  
   134  	// We'll create our test sync manager to have two active syncers.
   135  	syncMgr := newTestSyncManager(2)
   136  	syncMgr.Start()
   137  	defer syncMgr.Stop()
   138  
   139  	// The first will be an active syncer that performs a historical sync
   140  	// since it is the first one registered with the SyncManager.
   141  	historicalSyncPeer := randPeer(t, syncMgr.quit)
   142  	syncMgr.InitSyncState(historicalSyncPeer)
   143  	historicalSyncer := assertSyncerExistence(t, syncMgr, historicalSyncPeer)
   144  	assertTransitionToChansSynced(t, historicalSyncer, historicalSyncPeer)
   145  	assertActiveGossipTimestampRange(t, historicalSyncPeer)
   146  	assertSyncerStatus(t, historicalSyncer, chansSynced, ActiveSync)
   147  
   148  	// Then, we'll create the second active syncer, which is the one we'll
   149  	// disconnect.
   150  	activeSyncPeer := randPeer(t, syncMgr.quit)
   151  	syncMgr.InitSyncState(activeSyncPeer)
   152  	activeSyncer := assertSyncerExistence(t, syncMgr, activeSyncPeer)
   153  	assertActiveGossipTimestampRange(t, activeSyncPeer)
   154  	assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
   155  
   156  	// It will then be torn down to simulate a disconnection. Since there
   157  	// are no other candidate syncers available, the active syncer won't be
   158  	// replaced.
   159  	syncMgr.PruneSyncState(activeSyncPeer.PubKey())
   160  
   161  	// Then, we'll start our active syncer again, but this time we'll also
   162  	// have a passive syncer available to replace the active syncer after
   163  	// the peer disconnects.
   164  	syncMgr.InitSyncState(activeSyncPeer)
   165  	activeSyncer = assertSyncerExistence(t, syncMgr, activeSyncPeer)
   166  	assertActiveGossipTimestampRange(t, activeSyncPeer)
   167  	assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
   168  
   169  	// Create our second peer, which should be initialized as a passive
   170  	// syncer.
   171  	newActiveSyncPeer := randPeer(t, syncMgr.quit)
   172  	syncMgr.InitSyncState(newActiveSyncPeer)
   173  	newActiveSyncer := assertSyncerExistence(t, syncMgr, newActiveSyncPeer)
   174  	assertSyncerStatus(t, newActiveSyncer, chansSynced, PassiveSync)
   175  
   176  	// Disconnect our active syncer, which should trigger the SyncManager to
   177  	// replace it with our passive syncer.
   178  	go syncMgr.PruneSyncState(activeSyncPeer.PubKey())
   179  	assertPassiveSyncerTransition(t, newActiveSyncer, newActiveSyncPeer)
   180  }
   181  
   182  // TestSyncManagerRotateActiveSyncerCandidate tests that we can successfully
   183  // rotate our active syncers after a certain interval.
   184  func TestSyncManagerRotateActiveSyncerCandidate(t *testing.T) {
   185  	t.Parallel()
   186  
   187  	// We'll create our sync manager with three active syncers.
   188  	syncMgr := newTestSyncManager(1)
   189  	syncMgr.Start()
   190  	defer syncMgr.Stop()
   191  
   192  	// The first syncer registered always performs a historical sync.
   193  	activeSyncPeer := randPeer(t, syncMgr.quit)
   194  	syncMgr.InitSyncState(activeSyncPeer)
   195  	activeSyncer := assertSyncerExistence(t, syncMgr, activeSyncPeer)
   196  	assertTransitionToChansSynced(t, activeSyncer, activeSyncPeer)
   197  	assertActiveGossipTimestampRange(t, activeSyncPeer)
   198  	assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
   199  
   200  	// We'll send a tick to force a rotation. Since there aren't any
   201  	// candidates, none of the active syncers will be rotated.
   202  	syncMgr.cfg.RotateTicker.(*ticker.Force).Force <- time.Time{}
   203  	assertNoMsgSent(t, activeSyncPeer)
   204  	assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
   205  
   206  	// We'll then go ahead and add a passive syncer.
   207  	passiveSyncPeer := randPeer(t, syncMgr.quit)
   208  	syncMgr.InitSyncState(passiveSyncPeer)
   209  	passiveSyncer := assertSyncerExistence(t, syncMgr, passiveSyncPeer)
   210  	assertSyncerStatus(t, passiveSyncer, chansSynced, PassiveSync)
   211  
   212  	// We'll force another rotation - this time, since we have a passive
   213  	// syncer available, they should be rotated.
   214  	syncMgr.cfg.RotateTicker.(*ticker.Force).Force <- time.Time{}
   215  
   216  	// The transition from an active syncer to a passive syncer causes the
   217  	// peer to send out a new GossipTimestampRange in the past so that they
   218  	// don't receive new graph updates.
   219  	assertActiveSyncerTransition(t, activeSyncer, activeSyncPeer)
   220  
   221  	// The transition from a passive syncer to an active syncer causes the
   222  	// peer to send a new GossipTimestampRange with the current timestamp to
   223  	// signal that they would like to receive new graph updates from their
   224  	// peers. This will also cause the gossip syncer to redo its state
   225  	// machine, starting from its initial syncingChans state. We'll then
   226  	// need to transition it to its final chansSynced state to ensure the
   227  	// next syncer is properly started in the round-robin.
   228  	assertPassiveSyncerTransition(t, passiveSyncer, passiveSyncPeer)
   229  }
   230  
   231  // TestSyncManagerNoInitialHistoricalSync ensures no initial sync is attempted
   232  // when NumActiveSyncers is set to 0.
   233  func TestSyncManagerNoInitialHistoricalSync(t *testing.T) {
   234  	t.Parallel()
   235  
   236  	syncMgr := newTestSyncManager(0)
   237  	syncMgr.Start()
   238  	defer syncMgr.Stop()
   239  
   240  	// We should not expect any messages from the peer.
   241  	peer := randPeer(t, syncMgr.quit)
   242  	err := syncMgr.InitSyncState(peer)
   243  	require.NoError(t, err)
   244  	assertNoMsgSent(t, peer)
   245  
   246  	// Force the historical syncer to tick. This shouldn't happen normally
   247  	// since the ticker is never started. However, we will test that even if
   248  	// this were to occur that a historical sync does not progress.
   249  	syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{}
   250  
   251  	assertNoMsgSent(t, peer)
   252  	s := assertSyncerExistence(t, syncMgr, peer)
   253  	assertSyncerStatus(t, s, chansSynced, PassiveSync)
   254  }
   255  
   256  // TestSyncManagerInitialHistoricalSync ensures that we only attempt a single
   257  // historical sync during the SyncManager's startup. If the peer corresponding
   258  // to the initial historical syncer disconnects, we should attempt to find a
   259  // replacement.
   260  func TestSyncManagerInitialHistoricalSync(t *testing.T) {
   261  	t.Parallel()
   262  
   263  	syncMgr := newTestSyncManager(1)
   264  
   265  	// The graph should not be considered as synced since the sync manager
   266  	// has yet to start.
   267  	if syncMgr.IsGraphSynced() {
   268  		t.Fatal("expected graph to not be considered as synced")
   269  	}
   270  
   271  	syncMgr.Start()
   272  	defer syncMgr.Stop()
   273  
   274  	// We should expect to see a QueryChannelRange message with a
   275  	// FirstBlockHeight of the genesis block, signaling that an initial
   276  	// historical sync is being attempted.
   277  	peer := randPeer(t, syncMgr.quit)
   278  	syncMgr.InitSyncState(peer)
   279  	assertMsgSent(t, peer, &lnwire.QueryChannelRange{
   280  		FirstBlockHeight: 0,
   281  		NumBlocks:        latestKnownHeight,
   282  	})
   283  
   284  	// The graph should not be considered as synced since the initial
   285  	// historical sync has not finished.
   286  	if syncMgr.IsGraphSynced() {
   287  		t.Fatal("expected graph to not be considered as synced")
   288  	}
   289  
   290  	// If an additional peer connects, then another historical sync should
   291  	// not be attempted.
   292  	finalHistoricalPeer := randPeer(t, syncMgr.quit)
   293  	syncMgr.InitSyncState(finalHistoricalPeer)
   294  	finalHistoricalSyncer := assertSyncerExistence(t, syncMgr, finalHistoricalPeer)
   295  	assertNoMsgSent(t, finalHistoricalPeer)
   296  
   297  	// If we disconnect the peer performing the initial historical sync, a
   298  	// new one should be chosen.
   299  	syncMgr.PruneSyncState(peer.PubKey())
   300  
   301  	// Complete the initial historical sync by transitionining the syncer to
   302  	// its final chansSynced state. The graph should be considered as synced
   303  	// after the fact.
   304  	assertTransitionToChansSynced(t, finalHistoricalSyncer, finalHistoricalPeer)
   305  	if !syncMgr.IsGraphSynced() {
   306  		t.Fatal("expected graph to be considered as synced")
   307  	}
   308  	// The historical syncer should be active after the sync completes.
   309  	assertActiveGossipTimestampRange(t, finalHistoricalPeer)
   310  
   311  	// Once the initial historical sync has succeeded, another one should
   312  	// not be attempted by disconnecting the peer who performed it.
   313  	extraPeer := randPeer(t, syncMgr.quit)
   314  	syncMgr.InitSyncState(extraPeer)
   315  
   316  	// Pruning the first peer will cause the passive peer to send an active
   317  	// gossip timestamp msg, which we must consume asynchronously for the
   318  	// call to return.
   319  	var wg sync.WaitGroup
   320  	wg.Add(1)
   321  	go func() {
   322  		defer wg.Done()
   323  		assertActiveGossipTimestampRange(t, extraPeer)
   324  	}()
   325  	syncMgr.PruneSyncState(finalHistoricalPeer.PubKey())
   326  	wg.Wait()
   327  
   328  	// No further messages should be sent.
   329  	assertNoMsgSent(t, extraPeer)
   330  }
   331  
   332  // TestSyncManagerHistoricalSyncOnReconnect tests that the sync manager will
   333  // re-trigger a historical sync when a new peer connects after a historical
   334  // sync has completed, but we have lost all peers.
   335  func TestSyncManagerHistoricalSyncOnReconnect(t *testing.T) {
   336  	t.Parallel()
   337  
   338  	syncMgr := newTestSyncManager(2)
   339  	syncMgr.Start()
   340  	defer syncMgr.Stop()
   341  
   342  	// We should expect to see a QueryChannelRange message with a
   343  	// FirstBlockHeight of the genesis block, signaling that an initial
   344  	// historical sync is being attempted.
   345  	peer := randPeer(t, syncMgr.quit)
   346  	syncMgr.InitSyncState(peer)
   347  	s := assertSyncerExistence(t, syncMgr, peer)
   348  	assertTransitionToChansSynced(t, s, peer)
   349  	assertActiveGossipTimestampRange(t, peer)
   350  	assertSyncerStatus(t, s, chansSynced, ActiveSync)
   351  
   352  	// Now that the historical sync is completed, we prune the syncer,
   353  	// simulating all peers having disconnected.
   354  	syncMgr.PruneSyncState(peer.PubKey())
   355  
   356  	// If a new peer now connects, then another historical sync should
   357  	// be attempted. This is to ensure we get an up-to-date graph if we
   358  	// haven't had any peers for a time.
   359  	nextPeer := randPeer(t, syncMgr.quit)
   360  	syncMgr.InitSyncState(nextPeer)
   361  	s1 := assertSyncerExistence(t, syncMgr, nextPeer)
   362  	assertTransitionToChansSynced(t, s1, nextPeer)
   363  	assertActiveGossipTimestampRange(t, nextPeer)
   364  	assertSyncerStatus(t, s1, chansSynced, ActiveSync)
   365  }
   366  
   367  // TestSyncManagerForceHistoricalSync ensures that we can perform routine
   368  // historical syncs whenever the HistoricalSyncTicker fires.
   369  func TestSyncManagerForceHistoricalSync(t *testing.T) {
   370  	t.Parallel()
   371  
   372  	syncMgr := newTestSyncManager(1)
   373  	syncMgr.Start()
   374  	defer syncMgr.Stop()
   375  
   376  	// We should expect to see a QueryChannelRange message with a
   377  	// FirstBlockHeight of the genesis block, signaling that a historical
   378  	// sync is being attempted.
   379  	peer := randPeer(t, syncMgr.quit)
   380  	syncMgr.InitSyncState(peer)
   381  	assertMsgSent(t, peer, &lnwire.QueryChannelRange{
   382  		FirstBlockHeight: 0,
   383  		NumBlocks:        latestKnownHeight,
   384  	})
   385  
   386  	// If an additional peer connects, then a historical sync should not be
   387  	// attempted again.
   388  	extraPeer := randPeer(t, syncMgr.quit)
   389  	syncMgr.InitSyncState(extraPeer)
   390  	assertNoMsgSent(t, extraPeer)
   391  
   392  	// Then, we'll send a tick to force a historical sync. This should
   393  	// trigger the extra peer to also perform a historical sync since the
   394  	// first peer is not eligible due to not being in a chansSynced state.
   395  	syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{}
   396  	assertMsgSent(t, extraPeer, &lnwire.QueryChannelRange{
   397  		FirstBlockHeight: 0,
   398  		NumBlocks:        latestKnownHeight,
   399  	})
   400  }
   401  
   402  // TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement ensures that the
   403  // sync manager properly marks the graph as synced given that our initial
   404  // historical sync has stalled, but a replacement has fully completed.
   405  func TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement(t *testing.T) {
   406  	t.Parallel()
   407  
   408  	syncMgr := newTestSyncManager(1)
   409  	syncMgr.Start()
   410  	defer syncMgr.Stop()
   411  
   412  	// We should expect to see a QueryChannelRange message with a
   413  	// FirstBlockHeight of the genesis block, signaling that an initial
   414  	// historical sync is being attempted.
   415  	peer := randPeer(t, syncMgr.quit)
   416  	syncMgr.InitSyncState(peer)
   417  	assertMsgSent(t, peer, &lnwire.QueryChannelRange{
   418  		FirstBlockHeight: 0,
   419  		NumBlocks:        latestKnownHeight,
   420  	})
   421  
   422  	// The graph should not be considered as synced since the initial
   423  	// historical sync has not finished.
   424  	if syncMgr.IsGraphSynced() {
   425  		t.Fatal("expected graph to not be considered as synced")
   426  	}
   427  
   428  	// If an additional peer connects, then another historical sync should
   429  	// not be attempted.
   430  	finalHistoricalPeer := randPeer(t, syncMgr.quit)
   431  	syncMgr.InitSyncState(finalHistoricalPeer)
   432  	finalHistoricalSyncer := assertSyncerExistence(t, syncMgr, finalHistoricalPeer)
   433  	assertNoMsgSent(t, finalHistoricalPeer)
   434  
   435  	// To simulate that our initial historical sync has stalled, we'll force
   436  	// a historical sync with the new peer to ensure it is replaced.
   437  	syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{}
   438  
   439  	// The graph should still not be considered as synced since the
   440  	// replacement historical sync has not finished.
   441  	if syncMgr.IsGraphSynced() {
   442  		t.Fatal("expected graph to not be considered as synced")
   443  	}
   444  
   445  	// Complete the replacement historical sync by transitioning the syncer
   446  	// to its final chansSynced state. The graph should be considered as
   447  	// synced after the fact.
   448  	assertTransitionToChansSynced(t, finalHistoricalSyncer, finalHistoricalPeer)
   449  	if !syncMgr.IsGraphSynced() {
   450  		t.Fatal("expected graph to be considered as synced")
   451  	}
   452  }
   453  
   454  // TestSyncManagerWaitUntilInitialHistoricalSync ensures that no GossipSyncers
   455  // are initialized as ActiveSync until the initial historical sync has been
   456  // completed. Once it does, the pending GossipSyncers should be transitioned to
   457  // ActiveSync.
   458  func TestSyncManagerWaitUntilInitialHistoricalSync(t *testing.T) {
   459  	t.Parallel()
   460  
   461  	const numActiveSyncers = 2
   462  
   463  	// We'll start by creating our test sync manager which will hold up to
   464  	// 2 active syncers.
   465  	syncMgr := newTestSyncManager(numActiveSyncers)
   466  	syncMgr.Start()
   467  	defer syncMgr.Stop()
   468  
   469  	// We'll go ahead and create our syncers.
   470  	peers := make([]*mockPeer, 0, numActiveSyncers)
   471  	syncers := make([]*GossipSyncer, 0, numActiveSyncers)
   472  	for i := 0; i < numActiveSyncers; i++ {
   473  		peer := randPeer(t, syncMgr.quit)
   474  		peers = append(peers, peer)
   475  
   476  		syncMgr.InitSyncState(peer)
   477  		s := assertSyncerExistence(t, syncMgr, peer)
   478  		syncers = append(syncers, s)
   479  
   480  		// The first one always attempts a historical sync. We won't
   481  		// transition it to chansSynced to ensure the remaining syncers
   482  		// aren't started as active.
   483  		if i == 0 {
   484  			assertSyncerStatus(t, s, syncingChans, PassiveSync)
   485  			continue
   486  		}
   487  
   488  		// The rest should remain in a passive and chansSynced state,
   489  		// and they should be queued to transition to active once the
   490  		// initial historical sync is completed.
   491  		assertNoMsgSent(t, peer)
   492  		assertSyncerStatus(t, s, chansSynced, PassiveSync)
   493  	}
   494  
   495  	// To ensure we don't transition any pending active syncers that have
   496  	// previously disconnected, we'll disconnect the last one.
   497  	stalePeer := peers[numActiveSyncers-1]
   498  	syncMgr.PruneSyncState(stalePeer.PubKey())
   499  
   500  	// Then, we'll complete the initial historical sync by transitioning the
   501  	// historical syncer to its final chansSynced state. This should trigger
   502  	// all of the pending active syncers to transition, except for the one
   503  	// we disconnected.
   504  	assertTransitionToChansSynced(t, syncers[0], peers[0])
   505  	for i, s := range syncers {
   506  		if i == numActiveSyncers-1 {
   507  			assertNoMsgSent(t, peers[i])
   508  			continue
   509  		}
   510  		assertPassiveSyncerTransition(t, s, peers[i])
   511  	}
   512  }
   513  
   514  // assertNoMsgSent is a helper function that ensures a peer hasn't sent any
   515  // messages.
   516  func assertNoMsgSent(t *testing.T, peer *mockPeer) {
   517  	t.Helper()
   518  
   519  	select {
   520  	case msg := <-peer.sentMsgs:
   521  		t.Fatalf("peer %x sent unexpected message %v", peer.PubKey(),
   522  			spew.Sdump(msg))
   523  	case <-time.After(time.Second):
   524  	}
   525  }
   526  
   527  // assertMsgSent asserts that the peer has sent the given message.
   528  func assertMsgSent(t *testing.T, peer *mockPeer, msg lnwire.Message) {
   529  	t.Helper()
   530  
   531  	var msgSent lnwire.Message
   532  	select {
   533  	case msgSent = <-peer.sentMsgs:
   534  	case <-time.After(time.Second):
   535  		t.Fatalf("expected peer %x to send %T message", peer.PubKey(),
   536  			msg)
   537  	}
   538  
   539  	if !reflect.DeepEqual(msgSent, msg) {
   540  		t.Fatalf("expected peer %x to send message: %v\ngot: %v",
   541  			peer.PubKey(), spew.Sdump(msg), spew.Sdump(msgSent))
   542  	}
   543  }
   544  
   545  // assertActiveGossipTimestampRange is a helper function that ensures a peer has
   546  // sent a lnwire.GossipTimestampRange message indicating that it would like to
   547  // receive new graph updates.
   548  func assertActiveGossipTimestampRange(t *testing.T, peer *mockPeer) {
   549  	t.Helper()
   550  
   551  	var msgSent lnwire.Message
   552  	select {
   553  	case msgSent = <-peer.sentMsgs:
   554  	case <-time.After(2 * time.Second):
   555  		t.Fatalf("expected peer %x to send lnwire.GossipTimestampRange "+
   556  			"message", peer.PubKey())
   557  	}
   558  
   559  	msg, ok := msgSent.(*lnwire.GossipTimestampRange)
   560  	if !ok {
   561  		t.Fatalf("expected peer %x to send %T message", peer.PubKey(),
   562  			msg)
   563  	}
   564  	if msg.FirstTimestamp == 0 {
   565  		t.Fatalf("expected *lnwire.GossipTimestampRange message with " +
   566  			"non-zero FirstTimestamp")
   567  	}
   568  	if msg.TimestampRange == 0 {
   569  		t.Fatalf("expected *lnwire.GossipTimestampRange message with " +
   570  			"non-zero TimestampRange")
   571  	}
   572  }
   573  
   574  // assertSyncerExistence asserts that a GossipSyncer exists for the given peer.
   575  func assertSyncerExistence(t *testing.T, syncMgr *SyncManager,
   576  	peer *mockPeer) *GossipSyncer {
   577  
   578  	t.Helper()
   579  
   580  	s, ok := syncMgr.GossipSyncer(peer.PubKey())
   581  	if !ok {
   582  		t.Fatalf("gossip syncer for peer %x not found", peer.PubKey())
   583  	}
   584  
   585  	return s
   586  }
   587  
   588  // assertSyncerStatus asserts that the gossip syncer for the given peer matches
   589  // the expected sync state and type.
   590  func assertSyncerStatus(t *testing.T, s *GossipSyncer, syncState syncerState,
   591  	syncType SyncerType) {
   592  
   593  	t.Helper()
   594  
   595  	// We'll check the status of our syncer within a WaitPredicate as some
   596  	// sync transitions might cause this to be racy.
   597  	err := wait.NoError(func() error {
   598  		state := s.syncState()
   599  		if s.syncState() != syncState {
   600  			return fmt.Errorf("expected syncState %v for peer "+
   601  				"%x, got %v", syncState, s.cfg.peerPub, state)
   602  		}
   603  
   604  		typ := s.SyncType()
   605  		if s.SyncType() != syncType {
   606  			return fmt.Errorf("expected syncType %v for peer "+
   607  				"%x, got %v", syncType, s.cfg.peerPub, typ)
   608  		}
   609  
   610  		return nil
   611  	}, time.Second)
   612  	if err != nil {
   613  		t.Fatal(err)
   614  	}
   615  }
   616  
   617  // assertTransitionToChansSynced asserts the transition of an ActiveSync
   618  // GossipSyncer to its final chansSynced state.
   619  func assertTransitionToChansSynced(t *testing.T, s *GossipSyncer, peer *mockPeer) {
   620  	t.Helper()
   621  
   622  	query := &lnwire.QueryChannelRange{
   623  		FirstBlockHeight: 0,
   624  		NumBlocks:        latestKnownHeight,
   625  	}
   626  	assertMsgSent(t, peer, query)
   627  
   628  	require.Eventually(t, func() bool {
   629  		return s.syncState() == waitingQueryRangeReply
   630  	}, time.Second, 500*time.Millisecond)
   631  
   632  	require.NoError(t, s.ProcessQueryMsg(&lnwire.ReplyChannelRange{
   633  		ChainHash:        query.ChainHash,
   634  		FirstBlockHeight: query.FirstBlockHeight,
   635  		NumBlocks:        query.NumBlocks,
   636  		Complete:         1,
   637  	}, nil))
   638  
   639  	chanSeries := s.cfg.channelSeries.(*mockChannelGraphTimeSeries)
   640  
   641  	select {
   642  	case <-chanSeries.filterReq:
   643  		chanSeries.filterResp <- nil
   644  	case <-time.After(2 * time.Second):
   645  		t.Fatal("expected to receive FilterKnownChanIDs request")
   646  	}
   647  
   648  	err := wait.NoError(func() error {
   649  		state := syncerState(atomic.LoadUint32(&s.state))
   650  		if state != chansSynced {
   651  			return fmt.Errorf("expected syncerState %v, got %v",
   652  				chansSynced, state)
   653  		}
   654  
   655  		return nil
   656  	}, time.Second)
   657  	if err != nil {
   658  		t.Fatal(err)
   659  	}
   660  }
   661  
   662  // assertPassiveSyncerTransition asserts that a gossip syncer goes through all
   663  // of its expected steps when transitioning from passive to active.
   664  func assertPassiveSyncerTransition(t *testing.T, s *GossipSyncer, peer *mockPeer) {
   665  
   666  	t.Helper()
   667  
   668  	assertActiveGossipTimestampRange(t, peer)
   669  	assertSyncerStatus(t, s, chansSynced, ActiveSync)
   670  }
   671  
   672  // assertActiveSyncerTransition asserts that a gossip syncer goes through all of
   673  // its expected steps when transitioning from active to passive.
   674  func assertActiveSyncerTransition(t *testing.T, s *GossipSyncer, peer *mockPeer) {
   675  	t.Helper()
   676  
   677  	assertMsgSent(t, peer, &lnwire.GossipTimestampRange{
   678  		FirstTimestamp: uint32(zeroTimestamp.Unix()),
   679  		TimestampRange: 0,
   680  	})
   681  	assertSyncerStatus(t, s, chansSynced, PassiveSync)
   682  }