github.com/decred/dcrlnd@v0.7.6/discovery/syncer_test.go (about)

     1  package discovery
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math"
     7  	"reflect"
     8  	"sort"
     9  	"sync"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/davecgh/go-spew/spew"
    14  	"github.com/decred/dcrd/chaincfg/chainhash"
    15  	"github.com/decred/dcrd/chaincfg/v3"
    16  	"github.com/decred/dcrlnd/channeldb"
    17  	"github.com/decred/dcrlnd/lnwire"
    18  	"github.com/decred/dcrlnd/routing/route"
    19  	"github.com/stretchr/testify/require"
    20  )
    21  
    22  const (
    23  	defaultEncoding   = lnwire.EncodingSortedPlain
    24  	latestKnownHeight = 1337
    25  	startHeight       = latestKnownHeight - chanRangeQueryBuffer
    26  )
    27  
    28  var (
    29  	defaultChunkSize = encodingTypeToChunkSize[defaultEncoding]
    30  )
    31  
    32  type horizonQuery struct {
    33  	chain chainhash.Hash
    34  	start time.Time
    35  	end   time.Time
    36  }
    37  type filterRangeReq struct {
    38  	startHeight, endHeight uint32
    39  }
    40  
    41  type mockChannelGraphTimeSeries struct {
    42  	highestID lnwire.ShortChannelID
    43  
    44  	horizonReq  chan horizonQuery
    45  	horizonResp chan []lnwire.Message
    46  
    47  	filterReq  chan []lnwire.ShortChannelID
    48  	filterResp chan []lnwire.ShortChannelID
    49  
    50  	filterRangeReqs chan filterRangeReq
    51  	filterRangeResp chan []lnwire.ShortChannelID
    52  
    53  	annReq  chan []lnwire.ShortChannelID
    54  	annResp chan []lnwire.Message
    55  
    56  	updateReq  chan lnwire.ShortChannelID
    57  	updateResp chan []*lnwire.ChannelUpdate
    58  }
    59  
    60  func newMockChannelGraphTimeSeries(
    61  	hID lnwire.ShortChannelID) *mockChannelGraphTimeSeries {
    62  
    63  	return &mockChannelGraphTimeSeries{
    64  		highestID: hID,
    65  
    66  		horizonReq:  make(chan horizonQuery, 1),
    67  		horizonResp: make(chan []lnwire.Message, 1),
    68  
    69  		filterReq:  make(chan []lnwire.ShortChannelID, 1),
    70  		filterResp: make(chan []lnwire.ShortChannelID, 1),
    71  
    72  		filterRangeReqs: make(chan filterRangeReq, 1),
    73  		filterRangeResp: make(chan []lnwire.ShortChannelID, 1),
    74  
    75  		annReq:  make(chan []lnwire.ShortChannelID, 1),
    76  		annResp: make(chan []lnwire.Message, 1),
    77  
    78  		updateReq:  make(chan lnwire.ShortChannelID, 1),
    79  		updateResp: make(chan []*lnwire.ChannelUpdate, 1),
    80  	}
    81  }
    82  
    83  func (m *mockChannelGraphTimeSeries) HighestChanID(chain chainhash.Hash) (*lnwire.ShortChannelID, error) {
    84  	return &m.highestID, nil
    85  }
    86  func (m *mockChannelGraphTimeSeries) UpdatesInHorizon(chain chainhash.Hash,
    87  	startTime time.Time, endTime time.Time) ([]lnwire.Message, error) {
    88  
    89  	m.horizonReq <- horizonQuery{
    90  		chain, startTime, endTime,
    91  	}
    92  
    93  	return <-m.horizonResp, nil
    94  }
    95  func (m *mockChannelGraphTimeSeries) FilterKnownChanIDs(chain chainhash.Hash,
    96  	superSet []lnwire.ShortChannelID) ([]lnwire.ShortChannelID, error) {
    97  
    98  	m.filterReq <- superSet
    99  
   100  	return <-m.filterResp, nil
   101  }
   102  func (m *mockChannelGraphTimeSeries) FilterChannelRange(chain chainhash.Hash,
   103  	startHeight, endHeight uint32) ([]channeldb.BlockChannelRange, error) {
   104  
   105  	m.filterRangeReqs <- filterRangeReq{startHeight, endHeight}
   106  	reply := <-m.filterRangeResp
   107  
   108  	channelsPerBlock := make(map[uint32][]lnwire.ShortChannelID)
   109  	for _, cid := range reply {
   110  		channelsPerBlock[cid.BlockHeight] = append(
   111  			channelsPerBlock[cid.BlockHeight], cid,
   112  		)
   113  	}
   114  
   115  	// Return the channel ranges in ascending block height order.
   116  	blocks := make([]uint32, 0, len(channelsPerBlock))
   117  	for block := range channelsPerBlock {
   118  		blocks = append(blocks, block)
   119  	}
   120  	sort.Slice(blocks, func(i, j int) bool {
   121  		return blocks[i] < blocks[j]
   122  	})
   123  
   124  	channelRanges := make([]channeldb.BlockChannelRange, 0, len(channelsPerBlock))
   125  	for _, block := range blocks {
   126  		channelRanges = append(channelRanges, channeldb.BlockChannelRange{
   127  			Height:   block,
   128  			Channels: channelsPerBlock[block],
   129  		})
   130  	}
   131  
   132  	return channelRanges, nil
   133  }
   134  func (m *mockChannelGraphTimeSeries) FetchChanAnns(chain chainhash.Hash,
   135  	shortChanIDs []lnwire.ShortChannelID) ([]lnwire.Message, error) {
   136  
   137  	m.annReq <- shortChanIDs
   138  
   139  	return <-m.annResp, nil
   140  }
   141  func (m *mockChannelGraphTimeSeries) FetchChanUpdates(chain chainhash.Hash,
   142  	shortChanID lnwire.ShortChannelID) ([]*lnwire.ChannelUpdate, error) {
   143  
   144  	m.updateReq <- shortChanID
   145  
   146  	return <-m.updateResp, nil
   147  }
   148  
   149  var _ ChannelGraphTimeSeries = (*mockChannelGraphTimeSeries)(nil)
   150  
   151  type mockGossipState struct {
   152  	sync.Mutex
   153  	timestamps map[route.Vertex]time.Time
   154  }
   155  
   156  func newMockGossipState() *mockGossipState {
   157  	return &mockGossipState{timestamps: make(map[route.Vertex]time.Time)}
   158  }
   159  
   160  func (gs *mockGossipState) UpdatePeerLastGossipMsgTS(peer route.Vertex, ts time.Time) (bool, error) {
   161  	gs.Lock()
   162  	prevTS := gs.timestamps[peer]
   163  	var updated bool
   164  	if !ts.Before(prevTS) {
   165  		gs.timestamps[peer] = ts
   166  		updated = true
   167  	}
   168  	gs.Unlock()
   169  	return updated, nil
   170  }
   171  
   172  func (gs *mockGossipState) ReadPeerLastGossipMsgTS(peer route.Vertex) (time.Time, error) {
   173  	gs.Lock()
   174  	ts, ok := gs.timestamps[peer]
   175  	gs.Unlock()
   176  	if !ok {
   177  		return time.Time{}, fmt.Errorf("no recorded ts")
   178  	}
   179  	return ts, nil
   180  }
   181  
   182  var _ GossiperState = (*mockGossipState)(nil)
   183  
   184  // newTestSyncer creates a new test instance of a GossipSyncer. A buffered
   185  // message channel is returned for intercepting messages sent from the syncer,
   186  // in addition to a mock channel series which allows the test to control which
   187  // messages the syncer knows of or wishes to filter out. The variadic flags are
   188  // treated as positional arguments where the first index signals that the syncer
   189  // should spawn a channelGraphSyncer and second index signals that the syncer
   190  // should spawn a replyHandler. Any flags beyond the first two are currently
   191  // ignored. If no flags are provided, both a channelGraphSyncer and replyHandler
   192  // will be spawned by default.
   193  func newTestSyncer(hID lnwire.ShortChannelID,
   194  	encodingType lnwire.ShortChanIDEncoding, chunkSize int32,
   195  	flags ...bool) (chan []lnwire.Message,
   196  	*GossipSyncer, *mockChannelGraphTimeSeries) {
   197  
   198  	syncChannels := true
   199  	replyQueries := true
   200  	if len(flags) > 0 {
   201  		syncChannels = flags[0]
   202  	}
   203  	if len(flags) > 1 {
   204  		replyQueries = flags[1]
   205  	}
   206  
   207  	msgChan := make(chan []lnwire.Message, 20)
   208  	cfg := gossipSyncerCfg{
   209  		channelSeries:  newMockChannelGraphTimeSeries(hID),
   210  		gossiperState:  newMockGossipState(),
   211  		encodingType:   encodingType,
   212  		chunkSize:      chunkSize,
   213  		batchSize:      chunkSize,
   214  		noSyncChannels: !syncChannels,
   215  		noReplyQueries: !replyQueries,
   216  		sendToPeer: func(msgs ...lnwire.Message) error {
   217  			msgChan <- msgs
   218  			return nil
   219  		},
   220  		sendToPeerSync: func(msgs ...lnwire.Message) error {
   221  			msgChan <- msgs
   222  			return nil
   223  		},
   224  		delayedQueryReplyInterval: 2 * time.Second,
   225  		bestHeight: func() uint32 {
   226  			return latestKnownHeight
   227  		},
   228  		markGraphSynced:          func() {},
   229  		maxQueryChanRangeReplies: maxQueryChanRangeReplies,
   230  	}
   231  	syncer := newGossipSyncer(cfg)
   232  
   233  	return msgChan, syncer, cfg.channelSeries.(*mockChannelGraphTimeSeries)
   234  }
   235  
   236  // TestGossipSyncerFilterGossipMsgsNoHorizon tests that if the remote peer
   237  // doesn't have a horizon set, then we won't send any incoming messages to it.
   238  func TestGossipSyncerFilterGossipMsgsNoHorizon(t *testing.T) {
   239  	t.Parallel()
   240  
   241  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
   242  	// message to allow us to intercept their potential sends.
   243  	msgChan, syncer, _ := newTestSyncer(
   244  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
   245  		defaultChunkSize,
   246  	)
   247  
   248  	// With the syncer created, we'll create a set of messages to filter
   249  	// through the gossiper to the target peer.
   250  	msgs := []msgWithSenders{
   251  		{
   252  			msg: &lnwire.NodeAnnouncement{Timestamp: uint32(time.Now().Unix())},
   253  		},
   254  		{
   255  			msg: &lnwire.NodeAnnouncement{Timestamp: uint32(time.Now().Unix())},
   256  		},
   257  	}
   258  
   259  	// We'll then attempt to filter the set of messages through the target
   260  	// peer.
   261  	syncer.FilterGossipMsgs(msgs...)
   262  
   263  	// As the remote peer doesn't yet have a gossip timestamp set, we
   264  	// shouldn't receive any outbound messages.
   265  	select {
   266  	case msg := <-msgChan:
   267  		t.Fatalf("received message but shouldn't have: %v",
   268  			spew.Sdump(msg))
   269  
   270  	case <-time.After(time.Millisecond * 10):
   271  	}
   272  }
   273  
   274  func unixStamp(a int64) uint32 {
   275  	t := time.Unix(a, 0)
   276  	return uint32(t.Unix())
   277  }
   278  
   279  // TestGossipSyncerFilterGossipMsgsAll tests that we're able to properly filter
   280  // out a set of incoming messages based on the set remote update horizon for a
   281  // peer. We tests all messages type, and all time straddling. We'll also send a
   282  // channel ann that already has a channel update on disk.
   283  func TestGossipSyncerFilterGossipMsgsAllInMemory(t *testing.T) {
   284  	t.Parallel()
   285  
   286  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
   287  	// message to allow us to intercept their potential sends.
   288  	msgChan, syncer, chanSeries := newTestSyncer(
   289  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
   290  		defaultChunkSize,
   291  	)
   292  
   293  	// We'll create then apply a remote horizon for the target peer with a
   294  	// set of manually selected timestamps.
   295  	remoteHorizon := &lnwire.GossipTimestampRange{
   296  		FirstTimestamp: unixStamp(25000),
   297  		TimestampRange: uint32(1000),
   298  	}
   299  	syncer.remoteUpdateHorizon = remoteHorizon
   300  
   301  	// With the syncer created, we'll create a set of messages to filter
   302  	// through the gossiper to the target peer. Our message will consist of
   303  	// one node announcement above the horizon, one below. Additionally,
   304  	// we'll include a chan ann with an update below the horizon, one
   305  	// with an update timestamp above the horizon, and one without any
   306  	// channel updates at all.
   307  	msgs := []msgWithSenders{
   308  		{
   309  			// Node ann above horizon.
   310  			msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(25001)},
   311  		},
   312  		{
   313  			// Node ann below horizon.
   314  			msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(5)},
   315  		},
   316  		{
   317  			// Node ann above horizon.
   318  			msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(999999)},
   319  		},
   320  		{
   321  			// Ann tuple below horizon.
   322  			msg: &lnwire.ChannelAnnouncement{
   323  				ShortChannelID: lnwire.NewShortChanIDFromInt(10),
   324  			},
   325  		},
   326  		{
   327  			msg: &lnwire.ChannelUpdate{
   328  				ShortChannelID: lnwire.NewShortChanIDFromInt(10),
   329  				Timestamp:      unixStamp(5),
   330  			},
   331  		},
   332  		{
   333  			// Ann tuple above horizon.
   334  			msg: &lnwire.ChannelAnnouncement{
   335  				ShortChannelID: lnwire.NewShortChanIDFromInt(15),
   336  			},
   337  		},
   338  		{
   339  			msg: &lnwire.ChannelUpdate{
   340  				ShortChannelID: lnwire.NewShortChanIDFromInt(15),
   341  				Timestamp:      unixStamp(25002),
   342  			},
   343  		},
   344  		{
   345  			// Ann tuple beyond horizon.
   346  			msg: &lnwire.ChannelAnnouncement{
   347  				ShortChannelID: lnwire.NewShortChanIDFromInt(20),
   348  			},
   349  		},
   350  		{
   351  			msg: &lnwire.ChannelUpdate{
   352  				ShortChannelID: lnwire.NewShortChanIDFromInt(20),
   353  				Timestamp:      unixStamp(999999),
   354  			},
   355  		},
   356  		{
   357  			// Ann w/o an update at all, the update in the DB will
   358  			// be below the horizon.
   359  			msg: &lnwire.ChannelAnnouncement{
   360  				ShortChannelID: lnwire.NewShortChanIDFromInt(25),
   361  			},
   362  		},
   363  	}
   364  
   365  	// Before we send off the query, we'll ensure we send the missing
   366  	// channel update for that final ann. It will be below the horizon, so
   367  	// shouldn't be sent anyway.
   368  	errCh := make(chan error, 1)
   369  	go func() {
   370  		select {
   371  		case <-time.After(time.Second * 15):
   372  			errCh <- errors.New("no query received")
   373  			return
   374  		case query := <-chanSeries.updateReq:
   375  			// It should be asking for the chan updates of short
   376  			// chan ID 25.
   377  			expectedID := lnwire.NewShortChanIDFromInt(25)
   378  			if expectedID != query {
   379  				errCh <- fmt.Errorf("wrong query id: expected %v, got %v",
   380  					expectedID, query)
   381  				return
   382  			}
   383  
   384  			// If so, then we'll send back the missing update.
   385  			chanSeries.updateResp <- []*lnwire.ChannelUpdate{
   386  				{
   387  					ShortChannelID: lnwire.NewShortChanIDFromInt(25),
   388  					Timestamp:      unixStamp(5),
   389  				},
   390  			}
   391  			errCh <- nil
   392  		}
   393  	}()
   394  
   395  	// We'll then instruct the gossiper to filter this set of messages.
   396  	syncer.FilterGossipMsgs(msgs...)
   397  
   398  	// Out of all the messages we sent in, we should only get 2 of them
   399  	// back.
   400  	select {
   401  	case <-time.After(time.Second * 15):
   402  		t.Fatalf("no msgs received")
   403  
   404  	case msgs := <-msgChan:
   405  		if len(msgs) != 3 {
   406  			t.Fatalf("expected 3 messages instead got %v "+
   407  				"messages: %v", len(msgs), spew.Sdump(msgs))
   408  		}
   409  	}
   410  
   411  	// Wait for error from goroutine.
   412  	select {
   413  	case <-time.After(time.Second * 30):
   414  		t.Fatalf("goroutine did not return within 30 seconds")
   415  	case err := <-errCh:
   416  		if err != nil {
   417  			t.Fatal(err)
   418  		}
   419  	}
   420  }
   421  
   422  // TestGossipSyncerApplyNoHistoricalGossipFilter tests that once a gossip filter
   423  // is applied for the remote peer, then we don't send the peer all known
   424  // messages which are within their desired time horizon.
   425  func TestGossipSyncerApplyNoHistoricalGossipFilter(t *testing.T) {
   426  	t.Parallel()
   427  
   428  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
   429  	// message to allow us to intercept their potential sends.
   430  	_, syncer, chanSeries := newTestSyncer(
   431  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
   432  		defaultChunkSize,
   433  	)
   434  	syncer.cfg.ignoreHistoricalFilters = true
   435  
   436  	// We'll apply this gossip horizon for the remote peer.
   437  	remoteHorizon := &lnwire.GossipTimestampRange{
   438  		FirstTimestamp: unixStamp(25000),
   439  		TimestampRange: uint32(1000),
   440  	}
   441  
   442  	// After applying the gossip filter, the chan series should not be
   443  	// queried using the updated horizon.
   444  	errChan := make(chan error, 1)
   445  	var wg sync.WaitGroup
   446  	wg.Add(1)
   447  	go func() {
   448  		defer wg.Done()
   449  
   450  		select {
   451  		// No query received, success.
   452  		case <-time.After(3 * time.Second):
   453  			errChan <- nil
   454  
   455  		// Unexpected query received.
   456  		case <-chanSeries.horizonReq:
   457  			errChan <- errors.New("chan series should not have been " +
   458  				"queried")
   459  		}
   460  	}()
   461  
   462  	// We'll now attempt to apply the gossip filter for the remote peer.
   463  	syncer.ApplyGossipFilter(remoteHorizon)
   464  
   465  	// Ensure that the syncer's remote horizon was properly updated.
   466  	if !reflect.DeepEqual(syncer.remoteUpdateHorizon, remoteHorizon) {
   467  		t.Fatalf("expected remote horizon: %v, got: %v",
   468  			remoteHorizon, syncer.remoteUpdateHorizon)
   469  	}
   470  
   471  	// Wait for the query check to finish.
   472  	wg.Wait()
   473  
   474  	// Assert that no query was made as a result of applying the gossip
   475  	// filter.
   476  	err := <-errChan
   477  	if err != nil {
   478  		t.Fatalf(err.Error())
   479  	}
   480  }
   481  
   482  // TestGossipSyncerApplyGossipFilter tests that once a gossip filter is applied
   483  // for the remote peer, then we send the peer all known messages which are
   484  // within their desired time horizon.
   485  func TestGossipSyncerApplyGossipFilter(t *testing.T) {
   486  	t.Parallel()
   487  
   488  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
   489  	// message to allow us to intercept their potential sends.
   490  	msgChan, syncer, chanSeries := newTestSyncer(
   491  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
   492  		defaultChunkSize,
   493  	)
   494  
   495  	// We'll apply this gossip horizon for the remote peer.
   496  	remoteHorizon := &lnwire.GossipTimestampRange{
   497  		FirstTimestamp: unixStamp(25000),
   498  		TimestampRange: uint32(1000),
   499  	}
   500  
   501  	// Before we apply the horizon, we'll dispatch a response to the query
   502  	// that the syncer will issue.
   503  	errCh := make(chan error, 1)
   504  	go func() {
   505  		select {
   506  		case <-time.After(time.Second * 15):
   507  			errCh <- errors.New("no query recvd")
   508  			return
   509  		case query := <-chanSeries.horizonReq:
   510  			// The syncer should have translated the time range
   511  			// into the proper star time.
   512  			if remoteHorizon.FirstTimestamp != uint32(query.start.Unix()) {
   513  				errCh <- fmt.Errorf("wrong query stamp: expected %v, got %v",
   514  					remoteHorizon.FirstTimestamp, query.start)
   515  				return
   516  			}
   517  
   518  			// For this first response, we'll send back an empty
   519  			// set of messages. As result, we shouldn't send any
   520  			// messages.
   521  			chanSeries.horizonResp <- []lnwire.Message{}
   522  			errCh <- nil
   523  		}
   524  	}()
   525  
   526  	// We'll now attempt to apply the gossip filter for the remote peer.
   527  	err := syncer.ApplyGossipFilter(remoteHorizon)
   528  	if err != nil {
   529  		t.Fatalf("unable to apply filter: %v", err)
   530  	}
   531  
   532  	// There should be no messages in the message queue as we didn't send
   533  	// the syncer and messages within the horizon.
   534  	select {
   535  	case msgs := <-msgChan:
   536  		t.Fatalf("expected no msgs, instead got %v", spew.Sdump(msgs))
   537  	default:
   538  	}
   539  
   540  	// Wait for error result from goroutine.
   541  	select {
   542  	case <-time.After(time.Second * 30):
   543  		t.Fatalf("goroutine did not return within 30 seconds")
   544  	case err := <-errCh:
   545  		if err != nil {
   546  			t.Fatal(err)
   547  		}
   548  	}
   549  
   550  	// If we repeat the process, but give the syncer a set of valid
   551  	// messages, then these should be sent to the remote peer.
   552  	go func() {
   553  		select {
   554  		case <-time.After(time.Second * 15):
   555  			errCh <- errors.New("no query recvd")
   556  			return
   557  		case query := <-chanSeries.horizonReq:
   558  			// The syncer should have translated the time range
   559  			// into the proper star time.
   560  			if remoteHorizon.FirstTimestamp != uint32(query.start.Unix()) {
   561  				errCh <- fmt.Errorf("wrong query stamp: expected %v, got %v",
   562  					remoteHorizon.FirstTimestamp, query.start)
   563  				return
   564  			}
   565  
   566  			// For this first response, we'll send back a proper
   567  			// set of messages that should be echoed back.
   568  			chanSeries.horizonResp <- []lnwire.Message{
   569  				&lnwire.ChannelUpdate{
   570  					ShortChannelID: lnwire.NewShortChanIDFromInt(25),
   571  					Timestamp:      unixStamp(5),
   572  				},
   573  			}
   574  			errCh <- nil
   575  		}
   576  	}()
   577  	err = syncer.ApplyGossipFilter(remoteHorizon)
   578  	if err != nil {
   579  		t.Fatalf("unable to apply filter: %v", err)
   580  	}
   581  
   582  	// We should get back the exact same message.
   583  	select {
   584  	case <-time.After(time.Second * 15):
   585  		t.Fatalf("no msgs received")
   586  
   587  	case msgs := <-msgChan:
   588  		if len(msgs) != 1 {
   589  			t.Fatalf("wrong messages: expected %v, got %v",
   590  				1, len(msgs))
   591  		}
   592  	}
   593  
   594  	// Wait for error result from goroutine.
   595  	select {
   596  	case <-time.After(time.Second * 30):
   597  		t.Fatalf("goroutine did not return within 30 seconds")
   598  	case err := <-errCh:
   599  		if err != nil {
   600  			t.Fatal(err)
   601  		}
   602  	}
   603  }
   604  
   605  // TestGossipSyncerQueryChannelRangeWrongChainHash tests that if we receive a
   606  // channel range query for the wrong chain, then we send back a response with no
   607  // channels and complete=0.
   608  func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) {
   609  	t.Parallel()
   610  
   611  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
   612  	// message to allow us to intercept their potential sends.
   613  	msgChan, syncer, _ := newTestSyncer(
   614  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
   615  		defaultChunkSize,
   616  	)
   617  
   618  	// We'll now ask the syncer to reply to a channel range query, but for a
   619  	// chain that it isn't aware of.
   620  	query := &lnwire.QueryChannelRange{
   621  		ChainHash:        chaincfg.SimNetParams().GenesisHash,
   622  		FirstBlockHeight: 0,
   623  		NumBlocks:        math.MaxUint32,
   624  	}
   625  	err := syncer.replyChanRangeQuery(query)
   626  	if err != nil {
   627  		t.Fatalf("unable to process short chan ID's: %v", err)
   628  	}
   629  
   630  	select {
   631  	case <-time.After(time.Second * 15):
   632  		t.Fatalf("no msgs received")
   633  
   634  	case msgs := <-msgChan:
   635  		// We should get back exactly one message, that's a
   636  		// ReplyChannelRange with a matching query, and a complete value
   637  		// of zero.
   638  		if len(msgs) != 1 {
   639  			t.Fatalf("wrong messages: expected %v, got %v",
   640  				1, len(msgs))
   641  		}
   642  
   643  		msg, ok := msgs[0].(*lnwire.ReplyChannelRange)
   644  		if !ok {
   645  			t.Fatalf("expected lnwire.ReplyChannelRange, got %T", msg)
   646  		}
   647  
   648  		if msg.ChainHash != query.ChainHash {
   649  			t.Fatalf("wrong chain hash: expected %v got %v",
   650  				query.ChainHash, msg.ChainHash)
   651  		}
   652  		if msg.Complete != 0 {
   653  			t.Fatalf("expected complete set to 0, got %v",
   654  				msg.Complete)
   655  		}
   656  	}
   657  }
   658  
   659  // TestGossipSyncerReplyShortChanIDsWrongChainHash tests that if we get a chan
   660  // ID query for the wrong chain, then we send back only a short ID end with
   661  // complete=0.
   662  func TestGossipSyncerReplyShortChanIDsWrongChainHash(t *testing.T) {
   663  	t.Parallel()
   664  
   665  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
   666  	// message to allow us to intercept their potential sends.
   667  	msgChan, syncer, _ := newTestSyncer(
   668  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
   669  		defaultChunkSize,
   670  	)
   671  
   672  	// We'll now ask the syncer to reply to a chan ID query, but for a
   673  	// chain that it isn't aware of.
   674  	err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{
   675  		ChainHash: chaincfg.SimNetParams().GenesisHash,
   676  	})
   677  	if err != nil {
   678  		t.Fatalf("unable to process short chan ID's: %v", err)
   679  	}
   680  
   681  	select {
   682  	case <-time.After(time.Second * 15):
   683  		t.Fatalf("no msgs received")
   684  	case msgs := <-msgChan:
   685  
   686  		// We should get back exactly one message, that's a
   687  		// ReplyShortChanIDsEnd with a matching chain hash, and a
   688  		// complete value of zero.
   689  		if len(msgs) != 1 {
   690  			t.Fatalf("wrong messages: expected %v, got %v",
   691  				1, len(msgs))
   692  		}
   693  
   694  		msg, ok := msgs[0].(*lnwire.ReplyShortChanIDsEnd)
   695  		if !ok {
   696  			t.Fatalf("expected lnwire.ReplyShortChanIDsEnd "+
   697  				"instead got %T", msg)
   698  		}
   699  
   700  		simnetParams := chaincfg.SimNetParams()
   701  		if msg.ChainHash != simnetParams.GenesisHash {
   702  			t.Fatalf("wrong chain hash: expected %v, got %v",
   703  				msg.ChainHash, simnetParams.GenesisHash)
   704  		}
   705  		if msg.Complete != 0 {
   706  			t.Fatalf("complete set incorrectly")
   707  		}
   708  	}
   709  }
   710  
   711  // TestGossipSyncerReplyShortChanIDs tests that in the case of a known chain
   712  // hash for a QueryShortChanIDs, we'll return the set of matching
   713  // announcements, as well as an ending ReplyShortChanIDsEnd message.
   714  func TestGossipSyncerReplyShortChanIDs(t *testing.T) {
   715  	t.Parallel()
   716  
   717  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
   718  	// message to allow us to intercept their potential sends.
   719  	msgChan, syncer, chanSeries := newTestSyncer(
   720  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
   721  		defaultChunkSize,
   722  	)
   723  
   724  	queryChanIDs := []lnwire.ShortChannelID{
   725  		lnwire.NewShortChanIDFromInt(1),
   726  		lnwire.NewShortChanIDFromInt(2),
   727  		lnwire.NewShortChanIDFromInt(3),
   728  	}
   729  
   730  	queryReply := []lnwire.Message{
   731  		&lnwire.ChannelAnnouncement{
   732  			ShortChannelID: lnwire.NewShortChanIDFromInt(20),
   733  		},
   734  		&lnwire.ChannelUpdate{
   735  			ShortChannelID: lnwire.NewShortChanIDFromInt(20),
   736  			Timestamp:      unixStamp(999999),
   737  		},
   738  		&lnwire.NodeAnnouncement{Timestamp: unixStamp(25001)},
   739  	}
   740  
   741  	// We'll then craft a reply to the upcoming query for all the matching
   742  	// channel announcements for a particular set of short channel ID's.
   743  	errCh := make(chan error, 1)
   744  	go func() {
   745  		select {
   746  		case <-time.After(time.Second * 15):
   747  			errCh <- errors.New("no query recvd")
   748  			return
   749  		case chanIDs := <-chanSeries.annReq:
   750  			// The set of chan ID's should match exactly.
   751  			if !reflect.DeepEqual(chanIDs, queryChanIDs) {
   752  				errCh <- fmt.Errorf("wrong chan IDs: expected %v, got %v",
   753  					queryChanIDs, chanIDs)
   754  				return
   755  			}
   756  
   757  			// If they do, then we'll send back a response with
   758  			// some canned messages.
   759  			chanSeries.annResp <- queryReply
   760  			errCh <- nil
   761  		}
   762  	}()
   763  
   764  	// With our set up above complete, we'll now attempt to obtain a reply
   765  	// from the channel syncer for our target chan ID query.
   766  	err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{
   767  		ShortChanIDs: queryChanIDs,
   768  	})
   769  	if err != nil {
   770  		t.Fatalf("unable to query for chan IDs: %v", err)
   771  	}
   772  
   773  	for i := 0; i < len(queryReply)+1; i++ {
   774  		select {
   775  		case <-time.After(time.Second * 15):
   776  			t.Fatalf("no msgs received")
   777  
   778  		// We should get back exactly 4 messages. The first 3 are the
   779  		// same messages we sent above, and the query end message.
   780  		case msgs := <-msgChan:
   781  			if len(msgs) != 1 {
   782  				t.Fatalf("wrong number of messages: "+
   783  					"expected %v, got %v", 1, len(msgs))
   784  			}
   785  
   786  			isQueryReply := i < len(queryReply)
   787  			finalMsg, ok := msgs[0].(*lnwire.ReplyShortChanIDsEnd)
   788  
   789  			switch {
   790  			case isQueryReply &&
   791  				!reflect.DeepEqual(queryReply[i], msgs[0]):
   792  
   793  				t.Fatalf("wrong message: expected %v, got %v",
   794  					spew.Sdump(queryReply[i]),
   795  					spew.Sdump(msgs[0]))
   796  
   797  			case !isQueryReply && !ok:
   798  				t.Fatalf("expected lnwire.ReplyShortChanIDsEnd"+
   799  					" instead got %T", msgs[3])
   800  
   801  			case !isQueryReply && finalMsg.Complete != 1:
   802  				t.Fatalf("complete wasn't set")
   803  			}
   804  		}
   805  	}
   806  
   807  	// Wait for error from goroutine.
   808  	select {
   809  	case <-time.After(time.Second * 30):
   810  		t.Fatalf("goroutine did not return within 30 seconds")
   811  	case err := <-errCh:
   812  		if err != nil {
   813  			t.Fatal(err)
   814  		}
   815  	}
   816  }
   817  
   818  // TestGossipSyncerReplyChanRangeQuery tests that if we receive a
   819  // QueryChannelRange message, then we'll properly send back a chunked reply to
   820  // the remote peer.
   821  func TestGossipSyncerReplyChanRangeQuery(t *testing.T) {
   822  	t.Parallel()
   823  
   824  	// We'll use a smaller chunk size so we can easily test all the edge
   825  	// cases.
   826  	const chunkSize = 2
   827  
   828  	// We'll now create our test gossip syncer that will shortly respond to
   829  	// our canned query.
   830  	msgChan, syncer, chanSeries := newTestSyncer(
   831  		lnwire.NewShortChanIDFromInt(10), defaultEncoding, chunkSize,
   832  	)
   833  
   834  	// Next, we'll craft a query to ask for all the new chan ID's after
   835  	// block 100.
   836  	const startingBlockHeight = 100
   837  	const numBlocks = 50
   838  	const endingBlockHeight = startingBlockHeight + numBlocks - 1
   839  	query := &lnwire.QueryChannelRange{
   840  		FirstBlockHeight: uint32(startingBlockHeight),
   841  		NumBlocks:        uint32(numBlocks),
   842  	}
   843  
   844  	// We'll then launch a goroutine to reply to the query with a set of 5
   845  	// responses. This will ensure we get two full chunks, and one partial
   846  	// chunk.
   847  	queryResp := []lnwire.ShortChannelID{
   848  		{
   849  			BlockHeight: uint32(startingBlockHeight),
   850  		},
   851  		{
   852  			BlockHeight: 102,
   853  		},
   854  		{
   855  			BlockHeight: 104,
   856  		},
   857  		{
   858  			BlockHeight: 106,
   859  		},
   860  		{
   861  			BlockHeight: 108,
   862  		},
   863  	}
   864  
   865  	errCh := make(chan error, 1)
   866  	go func() {
   867  		select {
   868  		case <-time.After(time.Second * 15):
   869  			errCh <- errors.New("no query recvd")
   870  			return
   871  		case filterReq := <-chanSeries.filterRangeReqs:
   872  			// We should be querying for block 100 to 150.
   873  			if filterReq.startHeight != startingBlockHeight &&
   874  				filterReq.endHeight != endingBlockHeight {
   875  
   876  				errCh <- fmt.Errorf("wrong height range: %v",
   877  					spew.Sdump(filterReq))
   878  				return
   879  			}
   880  
   881  			// If the proper request was sent, then we'll respond
   882  			// with our set of short channel ID's.
   883  			chanSeries.filterRangeResp <- queryResp
   884  			errCh <- nil
   885  		}
   886  	}()
   887  
   888  	// With our goroutine active, we'll now issue the query.
   889  	if err := syncer.replyChanRangeQuery(query); err != nil {
   890  		t.Fatalf("unable to issue query: %v", err)
   891  	}
   892  
   893  	// At this point, we'll now wait for the syncer to send the chunked
   894  	// reply. We should get three sets of messages as two of them should be
   895  	// full, while the other is the final fragment.
   896  	const numExpectedChunks = 3
   897  	var prevResp *lnwire.ReplyChannelRange
   898  	respMsgs := make([]lnwire.ShortChannelID, 0, 5)
   899  	for i := 0; i < numExpectedChunks; i++ {
   900  		select {
   901  		case <-time.After(time.Second * 15):
   902  			t.Fatalf("no msgs received")
   903  
   904  		case msg := <-msgChan:
   905  			resp := msg[0]
   906  			rangeResp, ok := resp.(*lnwire.ReplyChannelRange)
   907  			if !ok {
   908  				t.Fatalf("expected ReplyChannelRange instead got %T", msg)
   909  			}
   910  
   911  			// We'll determine the correct values of each field in
   912  			// each response based on the order that they were sent.
   913  			var (
   914  				expectedFirstBlockHeight uint32
   915  				expectedNumBlocks        uint32
   916  				expectedComplete         uint8
   917  			)
   918  
   919  			switch {
   920  			// The first reply should range from our starting block
   921  			// height until it reaches its maximum capacity of
   922  			// channels.
   923  			case i == 0:
   924  				expectedFirstBlockHeight = startingBlockHeight
   925  				expectedNumBlocks = 4
   926  
   927  			// The last reply should range starting from the next
   928  			// block of our previous reply up until the ending
   929  			// height of the query. It should also have the Complete
   930  			// bit set.
   931  			case i == numExpectedChunks-1:
   932  				expectedFirstBlockHeight = prevResp.LastBlockHeight() + 1
   933  				expectedNumBlocks = endingBlockHeight - expectedFirstBlockHeight + 1
   934  				expectedComplete = 1
   935  
   936  			// Any intermediate replies should range starting from
   937  			// the next block of our previous reply up until it
   938  			// reaches its maximum capacity of channels.
   939  			default:
   940  				expectedFirstBlockHeight = prevResp.LastBlockHeight() + 1
   941  				expectedNumBlocks = 4
   942  			}
   943  
   944  			switch {
   945  			case rangeResp.FirstBlockHeight != expectedFirstBlockHeight:
   946  				t.Fatalf("FirstBlockHeight in resp #%d "+
   947  					"incorrect: expected %v, got %v", i+1,
   948  					expectedFirstBlockHeight,
   949  					rangeResp.FirstBlockHeight)
   950  
   951  			case rangeResp.NumBlocks != expectedNumBlocks:
   952  				t.Fatalf("NumBlocks in resp #%d incorrect: "+
   953  					"expected %v, got %v", i+1,
   954  					expectedNumBlocks, rangeResp.NumBlocks)
   955  
   956  			case rangeResp.Complete != expectedComplete:
   957  				t.Fatalf("Complete in resp #%d incorrect: "+
   958  					"expected %v, got %v", i+1,
   959  					expectedComplete, rangeResp.Complete)
   960  			}
   961  
   962  			prevResp = rangeResp
   963  			respMsgs = append(respMsgs, rangeResp.ShortChanIDs...)
   964  		}
   965  	}
   966  
   967  	// We should get back exactly 5 short chan ID's, and they should match
   968  	// exactly the ID's we sent as a reply.
   969  	if len(respMsgs) != len(queryResp) {
   970  		t.Fatalf("expected %v chan ID's, instead got %v",
   971  			len(queryResp), spew.Sdump(respMsgs))
   972  	}
   973  	if !reflect.DeepEqual(queryResp, respMsgs) {
   974  		t.Fatalf("mismatched response: expected %v, got %v",
   975  			spew.Sdump(queryResp), spew.Sdump(respMsgs))
   976  	}
   977  
   978  	// Wait for error from goroutine.
   979  	select {
   980  	case <-time.After(time.Second * 30):
   981  		t.Fatalf("goroutine did not return within 30 seconds")
   982  	case err := <-errCh:
   983  		if err != nil {
   984  			t.Fatal(err)
   985  		}
   986  	}
   987  }
   988  
   989  // TestGossipSyncerReplyChanRangeQuery tests a variety of
   990  // QueryChannelRange messages to ensure the underlying queries are
   991  // executed with the correct block range
   992  func TestGossipSyncerReplyChanRangeQueryBlockRange(t *testing.T) {
   993  	t.Parallel()
   994  
   995  	// First create our test gossip syncer that will handle and
   996  	// respond to the test queries
   997  	_, syncer, chanSeries := newTestSyncer(
   998  		lnwire.NewShortChanIDFromInt(10), defaultEncoding, math.MaxInt32,
   999  	)
  1000  
  1001  	// Next construct test queries with various startBlock and endBlock
  1002  	// ranges
  1003  	queryReqs := []*lnwire.QueryChannelRange{
  1004  		// full range example
  1005  		{
  1006  			FirstBlockHeight: uint32(0),
  1007  			NumBlocks:        uint32(math.MaxUint32),
  1008  		},
  1009  
  1010  		// small query example that does not overflow
  1011  		{
  1012  			FirstBlockHeight: uint32(1000),
  1013  			NumBlocks:        uint32(100),
  1014  		},
  1015  
  1016  		// overflow example
  1017  		{
  1018  			FirstBlockHeight: uint32(1000),
  1019  			NumBlocks:        uint32(math.MaxUint32),
  1020  		},
  1021  	}
  1022  
  1023  	// Next construct the expected filterRangeReq startHeight and endHeight
  1024  	// values that we will compare to the captured values
  1025  	expFilterReqs := []filterRangeReq{
  1026  		{
  1027  			startHeight: uint32(0),
  1028  			endHeight:   uint32(math.MaxUint32 - 1),
  1029  		},
  1030  		{
  1031  			startHeight: uint32(1000),
  1032  			endHeight:   uint32(1099),
  1033  		},
  1034  		{
  1035  			startHeight: uint32(1000),
  1036  			endHeight:   uint32(math.MaxUint32),
  1037  		},
  1038  	}
  1039  
  1040  	// We'll then launch a goroutine to capture the filterRangeReqs for
  1041  	// each request and return those results once all queries have been
  1042  	// received
  1043  	resultsCh := make(chan []filterRangeReq, 1)
  1044  	errCh := make(chan error, 1)
  1045  	go func() {
  1046  		// We will capture the values supplied to the chanSeries here
  1047  		// and return the results once all the requests have been
  1048  		// collected
  1049  		capFilterReqs := []filterRangeReq{}
  1050  
  1051  		for filterReq := range chanSeries.filterRangeReqs {
  1052  			// capture the filter request so we can compare to the
  1053  			// expected values later
  1054  			capFilterReqs = append(capFilterReqs, filterReq)
  1055  
  1056  			// Reply with an empty result for each query to allow
  1057  			// unblock the caller
  1058  			queryResp := []lnwire.ShortChannelID{}
  1059  			chanSeries.filterRangeResp <- queryResp
  1060  
  1061  			// Once we have collected all results send the results
  1062  			// back to the main thread and terminate the goroutine
  1063  			if len(capFilterReqs) == len(expFilterReqs) {
  1064  				resultsCh <- capFilterReqs
  1065  				return
  1066  			}
  1067  
  1068  		}
  1069  	}()
  1070  
  1071  	// We'll launch a goroutine to send the query sequentially. This
  1072  	// goroutine ensures that the timeout logic below on the mainthread
  1073  	// will be reached
  1074  	go func() {
  1075  		for _, query := range queryReqs {
  1076  			if err := syncer.replyChanRangeQuery(query); err != nil {
  1077  				errCh <- fmt.Errorf("unable to issue query: %v", err)
  1078  				return
  1079  			}
  1080  		}
  1081  	}()
  1082  
  1083  	// Wait for the results to be collected and validate that the
  1084  	// collected results match the expected results, the timeout to
  1085  	// expire, or an error to occur
  1086  	select {
  1087  	case capFilterReq := <-resultsCh:
  1088  		if !reflect.DeepEqual(expFilterReqs, capFilterReq) {
  1089  			t.Fatalf("mismatched filter reqs: expected %v, got %v",
  1090  				spew.Sdump(expFilterReqs), spew.Sdump(capFilterReq))
  1091  		}
  1092  	case <-time.After(time.Second * 10):
  1093  		t.Fatalf("goroutine did not return within 10 seconds")
  1094  	case err := <-errCh:
  1095  		if err != nil {
  1096  			t.Fatal(err)
  1097  		}
  1098  	}
  1099  }
  1100  
  1101  // TestGossipSyncerReplyChanRangeQueryNoNewChans tests that if we issue a reply
  1102  // for a channel range query, and we don't have any new channels, then we send
  1103  // back a single response that signals completion.
  1104  func TestGossipSyncerReplyChanRangeQueryNoNewChans(t *testing.T) {
  1105  	t.Parallel()
  1106  
  1107  	// We'll now create our test gossip syncer that will shortly respond to
  1108  	// our canned query.
  1109  	msgChan, syncer, chanSeries := newTestSyncer(
  1110  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
  1111  		defaultChunkSize,
  1112  	)
  1113  
  1114  	// Next, we'll craft a query to ask for all the new chan ID's after
  1115  	// block 100.
  1116  	query := &lnwire.QueryChannelRange{
  1117  		FirstBlockHeight: 100,
  1118  		NumBlocks:        50,
  1119  	}
  1120  
  1121  	// We'll then launch a goroutine to reply to the query no new channels.
  1122  	resp := []lnwire.ShortChannelID{}
  1123  	errCh := make(chan error, 1)
  1124  	go func() {
  1125  		select {
  1126  		case <-time.After(time.Second * 15):
  1127  			errCh <- errors.New("no query recvd")
  1128  			return
  1129  		case filterReq := <-chanSeries.filterRangeReqs:
  1130  			// We should be querying for block 100 to 150.
  1131  			if filterReq.startHeight != 100 && filterReq.endHeight != 150 {
  1132  				errCh <- fmt.Errorf("wrong height range: %v",
  1133  					spew.Sdump(filterReq))
  1134  				return
  1135  			}
  1136  			// If the proper request was sent, then we'll respond
  1137  			// with our blank set of short chan ID's.
  1138  			chanSeries.filterRangeResp <- resp
  1139  			errCh <- nil
  1140  		}
  1141  	}()
  1142  
  1143  	// With our goroutine active, we'll now issue the query.
  1144  	if err := syncer.replyChanRangeQuery(query); err != nil {
  1145  		t.Fatalf("unable to issue query: %v", err)
  1146  	}
  1147  
  1148  	// We should get back exactly one message, and the message should
  1149  	// indicate that this is the final in the series.
  1150  	select {
  1151  	case <-time.After(time.Second * 15):
  1152  		t.Fatalf("no msgs received")
  1153  
  1154  	case msg := <-msgChan:
  1155  		resp := msg[0]
  1156  		rangeResp, ok := resp.(*lnwire.ReplyChannelRange)
  1157  		if !ok {
  1158  			t.Fatalf("expected ReplyChannelRange instead got %T", msg)
  1159  		}
  1160  
  1161  		if len(rangeResp.ShortChanIDs) != 0 {
  1162  			t.Fatalf("expected no chan ID's, instead "+
  1163  				"got: %v", spew.Sdump(rangeResp.ShortChanIDs))
  1164  		}
  1165  		if rangeResp.Complete != 1 {
  1166  			t.Fatalf("complete wasn't set")
  1167  		}
  1168  	}
  1169  
  1170  	// Wait for error from goroutine.
  1171  	select {
  1172  	case <-time.After(time.Second * 30):
  1173  		t.Fatalf("goroutine did not return within 30 seconds")
  1174  	case err := <-errCh:
  1175  		if err != nil {
  1176  			t.Fatal(err)
  1177  		}
  1178  	}
  1179  }
  1180  
  1181  // TestGossipSyncerGenChanRangeQuery tests that given the current best known
  1182  // channel ID, we properly generate an correct initial channel range response.
  1183  func TestGossipSyncerGenChanRangeQuery(t *testing.T) {
  1184  	t.Parallel()
  1185  
  1186  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
  1187  	// message to allow us to intercept their potential sends.
  1188  	const startingHeight = 200
  1189  	_, syncer, _ := newTestSyncer(
  1190  		lnwire.ShortChannelID{BlockHeight: startingHeight},
  1191  		defaultEncoding, defaultChunkSize,
  1192  	)
  1193  
  1194  	// If we now ask the syncer to generate an initial range query, it
  1195  	// should return a start height that's back chanRangeQueryBuffer
  1196  	// blocks.
  1197  	rangeQuery, err := syncer.genChanRangeQuery(false)
  1198  	if err != nil {
  1199  		t.Fatalf("unable to resp: %v", err)
  1200  	}
  1201  
  1202  	firstHeight := uint32(startingHeight - chanRangeQueryBuffer)
  1203  	if rangeQuery.FirstBlockHeight != firstHeight {
  1204  		t.Fatalf("incorrect chan range query: expected %v, %v",
  1205  			rangeQuery.FirstBlockHeight,
  1206  			startingHeight-chanRangeQueryBuffer)
  1207  	}
  1208  	if rangeQuery.NumBlocks != latestKnownHeight-firstHeight {
  1209  		t.Fatalf("wrong num blocks: expected %v, got %v",
  1210  			latestKnownHeight-firstHeight, rangeQuery.NumBlocks)
  1211  	}
  1212  
  1213  	// Generating a historical range query should result in a start height
  1214  	// of 0.
  1215  	rangeQuery, err = syncer.genChanRangeQuery(true)
  1216  	if err != nil {
  1217  		t.Fatalf("unable to resp: %v", err)
  1218  	}
  1219  	if rangeQuery.FirstBlockHeight != 0 {
  1220  		t.Fatalf("incorrect chan range query: expected %v, %v", 0,
  1221  			rangeQuery.FirstBlockHeight)
  1222  	}
  1223  	if rangeQuery.NumBlocks != latestKnownHeight {
  1224  		t.Fatalf("wrong num blocks: expected %v, got %v",
  1225  			latestKnownHeight, rangeQuery.NumBlocks)
  1226  	}
  1227  }
  1228  
  1229  // TestGossipSyncerProcessChanRangeReply tests that we'll properly buffer
  1230  // replied channel replies until we have the complete version.
  1231  func TestGossipSyncerProcessChanRangeReply(t *testing.T) {
  1232  	t.Parallel()
  1233  
  1234  	t.Run("legacy", func(t *testing.T) {
  1235  		testGossipSyncerProcessChanRangeReply(t, true)
  1236  	})
  1237  	t.Run("block ranges", func(t *testing.T) {
  1238  		testGossipSyncerProcessChanRangeReply(t, false)
  1239  	})
  1240  }
  1241  
  1242  // testGossipSyncerProcessChanRangeReply tests that we'll properly buffer
  1243  // replied channel replies until we have the complete version. The legacy
  1244  // option, if set, uses the Complete field of the reply to determine when we've
  1245  // received all expected replies. Otherwise, it looks at the block ranges of
  1246  // each reply instead.
  1247  func testGossipSyncerProcessChanRangeReply(t *testing.T, legacy bool) {
  1248  	t.Parallel()
  1249  
  1250  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
  1251  	// message to allow us to intercept their potential sends.
  1252  	highestID := lnwire.ShortChannelID{
  1253  		BlockHeight: latestKnownHeight,
  1254  	}
  1255  	_, syncer, chanSeries := newTestSyncer(
  1256  		highestID, defaultEncoding, defaultChunkSize,
  1257  	)
  1258  
  1259  	startingState := syncer.state
  1260  
  1261  	query, err := syncer.genChanRangeQuery(true)
  1262  	if err != nil {
  1263  		t.Fatalf("unable to generate channel range query: %v", err)
  1264  	}
  1265  
  1266  	// When interpreting block ranges, the first reply should start from
  1267  	// our requested first block, and the last should end at our requested
  1268  	// last block.
  1269  	replies := []*lnwire.ReplyChannelRange{
  1270  		{
  1271  			FirstBlockHeight: 0,
  1272  			NumBlocks:        11,
  1273  			ShortChanIDs: []lnwire.ShortChannelID{
  1274  				{
  1275  					BlockHeight: 10,
  1276  				},
  1277  			},
  1278  		},
  1279  		{
  1280  			FirstBlockHeight: 11,
  1281  			NumBlocks:        1,
  1282  			ShortChanIDs: []lnwire.ShortChannelID{
  1283  				{
  1284  					BlockHeight: 11,
  1285  				},
  1286  			},
  1287  		},
  1288  		{
  1289  			FirstBlockHeight: 12,
  1290  			NumBlocks:        query.NumBlocks - 12,
  1291  			Complete:         1,
  1292  			ShortChanIDs: []lnwire.ShortChannelID{
  1293  				{
  1294  					BlockHeight: 12,
  1295  				},
  1296  			},
  1297  		},
  1298  	}
  1299  
  1300  	// Each reply query is the same as the original query in the legacy
  1301  	// mode.
  1302  	if legacy {
  1303  		replies[0].FirstBlockHeight = query.FirstBlockHeight
  1304  		replies[0].NumBlocks = query.NumBlocks
  1305  
  1306  		replies[1].FirstBlockHeight = query.FirstBlockHeight
  1307  		replies[1].NumBlocks = query.NumBlocks
  1308  
  1309  		replies[2].FirstBlockHeight = query.FirstBlockHeight
  1310  		replies[2].NumBlocks = query.NumBlocks
  1311  	}
  1312  
  1313  	// We'll begin by sending the syncer a set of non-complete channel
  1314  	// range replies.
  1315  	if err := syncer.processChanRangeReply(replies[0]); err != nil {
  1316  		t.Fatalf("unable to process reply: %v", err)
  1317  	}
  1318  	if err := syncer.processChanRangeReply(replies[1]); err != nil {
  1319  		t.Fatalf("unable to process reply: %v", err)
  1320  	}
  1321  
  1322  	// At this point, we should still be in our starting state as the query
  1323  	// hasn't finished.
  1324  	if syncer.state != startingState {
  1325  		t.Fatalf("state should not have transitioned")
  1326  	}
  1327  
  1328  	expectedReq := []lnwire.ShortChannelID{
  1329  		{
  1330  			BlockHeight: 10,
  1331  		},
  1332  		{
  1333  			BlockHeight: 11,
  1334  		},
  1335  		{
  1336  			BlockHeight: 12,
  1337  		},
  1338  	}
  1339  
  1340  	// As we're about to send the final response, we'll launch a goroutine
  1341  	// to respond back with a filtered set of chan ID's.
  1342  	errCh := make(chan error, 1)
  1343  	go func() {
  1344  		select {
  1345  		case <-time.After(time.Second * 15):
  1346  			errCh <- errors.New("no query received")
  1347  			return
  1348  
  1349  		case req := <-chanSeries.filterReq:
  1350  			// We should get a request for the entire range of short
  1351  			// chan ID's.
  1352  			if !reflect.DeepEqual(expectedReq, req) {
  1353  				errCh <- fmt.Errorf("wrong request: expected %v, got %v",
  1354  					expectedReq, req)
  1355  				return
  1356  			}
  1357  
  1358  			// We'll send back only the last two to simulate filtering.
  1359  			chanSeries.filterResp <- expectedReq[1:]
  1360  			errCh <- nil
  1361  		}
  1362  	}()
  1363  
  1364  	// If we send the final message, then we should transition to
  1365  	// queryNewChannels as we've sent a non-empty set of new channels.
  1366  	if err := syncer.processChanRangeReply(replies[2]); err != nil {
  1367  		t.Fatalf("unable to process reply: %v", err)
  1368  	}
  1369  
  1370  	if syncer.syncState() != queryNewChannels {
  1371  		t.Fatalf("wrong state: expected %v instead got %v",
  1372  			queryNewChannels, syncer.state)
  1373  	}
  1374  	if !reflect.DeepEqual(syncer.newChansToQuery, expectedReq[1:]) {
  1375  		t.Fatalf("wrong set of chans to query: expected %v, got %v",
  1376  			syncer.newChansToQuery, expectedReq[1:])
  1377  	}
  1378  
  1379  	// Wait for error from goroutine.
  1380  	select {
  1381  	case <-time.After(time.Second * 30):
  1382  		t.Fatalf("goroutine did not return within 30 seconds")
  1383  	case err := <-errCh:
  1384  		if err != nil {
  1385  			t.Fatal(err)
  1386  		}
  1387  	}
  1388  }
  1389  
  1390  // TestGossipSyncerSynchronizeChanIDs tests that we properly request chunks of
  1391  // the short chan ID's which were unknown to us. We'll ensure that we request
  1392  // chunk by chunk, and after the last chunk, we return true indicating that we
  1393  // can transition to the synced stage.
  1394  func TestGossipSyncerSynchronizeChanIDs(t *testing.T) {
  1395  	t.Parallel()
  1396  
  1397  	// We'll modify the chunk size to be a smaller value, so we can ensure
  1398  	// our chunk parsing works properly. With this value we should get 3
  1399  	// queries: two full chunks, and one lingering chunk.
  1400  	const chunkSize = 2
  1401  
  1402  	// First, we'll create a GossipSyncer instance with a canned sendToPeer
  1403  	// message to allow us to intercept their potential sends.
  1404  	msgChan, syncer, _ := newTestSyncer(
  1405  		lnwire.NewShortChanIDFromInt(10), defaultEncoding, chunkSize,
  1406  	)
  1407  
  1408  	// Next, we'll construct a set of chan ID's that we should query for,
  1409  	// and set them as newChansToQuery within the state machine.
  1410  	newChanIDs := []lnwire.ShortChannelID{
  1411  		lnwire.NewShortChanIDFromInt(1),
  1412  		lnwire.NewShortChanIDFromInt(2),
  1413  		lnwire.NewShortChanIDFromInt(3),
  1414  		lnwire.NewShortChanIDFromInt(4),
  1415  		lnwire.NewShortChanIDFromInt(5),
  1416  	}
  1417  	syncer.newChansToQuery = newChanIDs
  1418  
  1419  	for i := 0; i < chunkSize*2; i += 2 {
  1420  		// With our set up complete, we'll request a sync of chan ID's.
  1421  		done, err := syncer.synchronizeChanIDs()
  1422  		if err != nil {
  1423  			t.Fatalf("unable to sync chan IDs: %v", err)
  1424  		}
  1425  
  1426  		// At this point, we shouldn't yet be done as only 2 items
  1427  		// should have been queried for.
  1428  		if done {
  1429  			t.Fatalf("syncer shown as done, but shouldn't be!")
  1430  		}
  1431  
  1432  		// We should've received a new message from the syncer.
  1433  		select {
  1434  		case <-time.After(time.Second * 15):
  1435  			t.Fatalf("no msgs received")
  1436  
  1437  		case msg := <-msgChan:
  1438  			queryMsg, ok := msg[0].(*lnwire.QueryShortChanIDs)
  1439  			if !ok {
  1440  				t.Fatalf("expected QueryShortChanIDs instead "+
  1441  					"got %T", msg)
  1442  			}
  1443  
  1444  			// The query message should have queried for the first
  1445  			// two chan ID's, and nothing more.
  1446  			if !reflect.DeepEqual(queryMsg.ShortChanIDs, newChanIDs[i:i+chunkSize]) {
  1447  				t.Fatalf("wrong query: expected %v, got %v",
  1448  					spew.Sdump(newChanIDs[i:i+chunkSize]),
  1449  					queryMsg.ShortChanIDs)
  1450  			}
  1451  		}
  1452  
  1453  		// With the proper message sent out, the internal state of the
  1454  		// syncer should reflect that it still has more channels to
  1455  		// query for.
  1456  		if !reflect.DeepEqual(syncer.newChansToQuery, newChanIDs[i+chunkSize:]) {
  1457  			t.Fatalf("incorrect chans to query for: expected %v, got %v",
  1458  				spew.Sdump(newChanIDs[i+chunkSize:]),
  1459  				syncer.newChansToQuery)
  1460  		}
  1461  	}
  1462  
  1463  	// At this point, only one more channel should be lingering for the
  1464  	// syncer to query for.
  1465  	if !reflect.DeepEqual(newChanIDs[chunkSize*2:], syncer.newChansToQuery) {
  1466  		t.Fatalf("wrong chans to query: expected %v, got %v",
  1467  			newChanIDs[chunkSize*2:], syncer.newChansToQuery)
  1468  	}
  1469  
  1470  	// If we issue another query, the syncer should tell us that it's done.
  1471  	done, err := syncer.synchronizeChanIDs()
  1472  	if err != nil {
  1473  		t.Fatalf("unable to sync chan IDs: %v", err)
  1474  	}
  1475  	if done {
  1476  		t.Fatalf("syncer should be finished!")
  1477  	}
  1478  
  1479  	select {
  1480  	case <-time.After(time.Second * 15):
  1481  		t.Fatalf("no msgs received")
  1482  
  1483  	case msg := <-msgChan:
  1484  		queryMsg, ok := msg[0].(*lnwire.QueryShortChanIDs)
  1485  		if !ok {
  1486  			t.Fatalf("expected QueryShortChanIDs instead "+
  1487  				"got %T", msg)
  1488  		}
  1489  
  1490  		// The query issued should simply be the last item.
  1491  		if !reflect.DeepEqual(queryMsg.ShortChanIDs, newChanIDs[chunkSize*2:]) {
  1492  			t.Fatalf("wrong query: expected %v, got %v",
  1493  				spew.Sdump(newChanIDs[chunkSize*2:]),
  1494  				queryMsg.ShortChanIDs)
  1495  		}
  1496  
  1497  		// There also should be no more channels to query.
  1498  		if len(syncer.newChansToQuery) != 0 {
  1499  			t.Fatalf("should be no more chans to query for, "+
  1500  				"instead have %v",
  1501  				spew.Sdump(syncer.newChansToQuery))
  1502  		}
  1503  	}
  1504  }
  1505  
  1506  // TestGossipSyncerDelayDOS tests that the gossip syncer will begin delaying
  1507  // queries after its prescribed allotment of undelayed query responses. Once
  1508  // this happens, all query replies should be delayed by the configurated
  1509  // interval.
  1510  func TestGossipSyncerDelayDOS(t *testing.T) {
  1511  	t.Parallel()
  1512  
  1513  	// We'll modify the chunk size to be a smaller value, since we'll be
  1514  	// sending a modest number of queries. After exhausting our undelayed
  1515  	// gossip queries, we'll send two extra queries and ensure that they are
  1516  	// delayed properly.
  1517  	const chunkSize = 2
  1518  	const numDelayedQueries = 2
  1519  	const delayTolerance = time.Millisecond * 200
  1520  
  1521  	// First, we'll create two GossipSyncer instances with a canned
  1522  	// sendToPeer message to allow us to intercept their potential sends.
  1523  	highestID := lnwire.ShortChannelID{
  1524  		BlockHeight: 1144,
  1525  	}
  1526  	msgChan1, syncer1, chanSeries1 := newTestSyncer(
  1527  		highestID, defaultEncoding, chunkSize, true, false,
  1528  	)
  1529  	syncer1.Start()
  1530  	defer syncer1.Stop()
  1531  
  1532  	msgChan2, syncer2, chanSeries2 := newTestSyncer(
  1533  		highestID, defaultEncoding, chunkSize, false, true,
  1534  	)
  1535  	syncer2.Start()
  1536  	defer syncer2.Stop()
  1537  
  1538  	// Record the delayed query reply interval used by each syncer.
  1539  	delayedQueryInterval := syncer1.cfg.delayedQueryReplyInterval
  1540  
  1541  	// Record the number of undelayed queries allowed by the syncers.
  1542  	numUndelayedQueries := syncer1.cfg.maxUndelayedQueryReplies
  1543  
  1544  	// We will send enough queries to exhaust the undelayed responses, and
  1545  	// then send two more queries which should be delayed. An additional one
  1546  	// is subtracted from the total since undelayed message will be consumed
  1547  	// by the initial QueryChannelRange.
  1548  	numQueryResponses := numUndelayedQueries + numDelayedQueries - 1
  1549  
  1550  	// The total number of responses must include the initial reply each
  1551  	// syncer will make to QueryChannelRange.
  1552  	numTotalQueries := 1 + numQueryResponses
  1553  
  1554  	// The total number of channels each syncer needs to request must be
  1555  	// scaled by the chunk size being used.
  1556  	numTotalChans := numQueryResponses * chunkSize
  1557  
  1558  	// Construct enough channels so that all of the queries will have enough
  1559  	// channels. Since syncer1 won't know of any channels, their sets are
  1560  	// inherently disjoint.
  1561  	var syncer2Chans []lnwire.ShortChannelID
  1562  	for i := 0; i < numTotalChans; i++ {
  1563  		syncer2Chans = append([]lnwire.ShortChannelID{
  1564  			{
  1565  				BlockHeight: highestID.BlockHeight - uint32(i) - 1,
  1566  				TxIndex:     uint32(i),
  1567  			},
  1568  		}, syncer2Chans...)
  1569  	}
  1570  
  1571  	// We'll kick off the test by asserting syncer1 sends over the
  1572  	// QueryChannelRange message the other node.
  1573  	select {
  1574  	case <-time.After(time.Second * 2):
  1575  		t.Fatalf("didn't get msg from syncer1")
  1576  
  1577  	case msgs := <-msgChan1:
  1578  		for _, msg := range msgs {
  1579  			// The message MUST be a QueryChannelRange message.
  1580  			_, ok := msg.(*lnwire.QueryChannelRange)
  1581  			if !ok {
  1582  				t.Fatalf("wrong message: expected "+
  1583  					"QueryChannelRange for %T", msg)
  1584  			}
  1585  
  1586  			select {
  1587  			case <-time.After(time.Second * 2):
  1588  				t.Fatalf("node 2 didn't read msg")
  1589  
  1590  			case syncer2.queryMsgs <- msg:
  1591  
  1592  			}
  1593  		}
  1594  	}
  1595  
  1596  	// At this point, we'll need to a response from syncer2's channel
  1597  	// series. This will cause syncer1 to simply request the entire set of
  1598  	// channels from syncer2. This will count as the first undelayed
  1599  	// response for sycner2.
  1600  	select {
  1601  	case <-time.After(time.Second * 2):
  1602  		t.Fatalf("no query recvd")
  1603  
  1604  	case <-chanSeries2.filterRangeReqs:
  1605  		// We'll send back all the channels that it should know of.
  1606  		chanSeries2.filterRangeResp <- syncer2Chans
  1607  	}
  1608  
  1609  	// At this point, we'll assert that the ReplyChannelRange message is
  1610  	// sent by sycner2.
  1611  	for i := 0; i < numQueryResponses; i++ {
  1612  		select {
  1613  		case <-time.After(time.Second * 2):
  1614  			t.Fatalf("didn't get msg from syncer2")
  1615  
  1616  		case msgs := <-msgChan2:
  1617  			for _, msg := range msgs {
  1618  				// The message MUST be a ReplyChannelRange message.
  1619  				_, ok := msg.(*lnwire.ReplyChannelRange)
  1620  				if !ok {
  1621  					t.Fatalf("wrong message: expected "+
  1622  						"QueryChannelRange for %T", msg)
  1623  				}
  1624  
  1625  				select {
  1626  				case <-time.After(time.Second * 2):
  1627  					t.Fatalf("node 2 didn't read msg")
  1628  
  1629  				case syncer1.gossipMsgs <- msg:
  1630  				}
  1631  			}
  1632  		}
  1633  	}
  1634  
  1635  	// We'll now have syncer1 process the received sids from syncer2.
  1636  	select {
  1637  	case <-time.After(time.Second * 2):
  1638  		t.Fatalf("no query recvd")
  1639  
  1640  	case <-chanSeries1.filterReq:
  1641  		chanSeries1.filterResp <- syncer2Chans
  1642  	}
  1643  
  1644  	// At this point, syncer1 should start to send out initial requests to
  1645  	// query the chan IDs of the remote party. We'll keep track of the
  1646  	// number of queries made using the iterated value, which starts at one
  1647  	// due the initial contribution of the QueryChannelRange msgs.
  1648  	for i := 1; i < numTotalQueries; i++ {
  1649  		expDelayResponse := i >= numUndelayedQueries
  1650  		queryBatch(t,
  1651  			msgChan1, msgChan2,
  1652  			syncer1, syncer2,
  1653  			chanSeries2,
  1654  			expDelayResponse,
  1655  			delayedQueryInterval,
  1656  			delayTolerance,
  1657  		)
  1658  	}
  1659  }
  1660  
  1661  // queryBatch is a helper method that will query for a single batch of channels
  1662  // from a peer and assert the responses. The method can also be used to assert
  1663  // the same transition happens, but is delayed by the remote peer's DOS
  1664  // rate-limiting. The provided chanSeries should belong to syncer2.
  1665  //
  1666  // The state transition performed is the following:
  1667  //
  1668  //	syncer1  -- QueryShortChanIDs -->   syncer2
  1669  //	                                    chanSeries.FetchChanAnns()
  1670  //	syncer1 <-- ReplyShortChanIDsEnd -- syncer2
  1671  //
  1672  // If expDelayResponse is true, this method will assert that the call the
  1673  // FetchChanAnns happens between:
  1674  //
  1675  //	[delayedQueryInterval-delayTolerance, delayedQueryInterval+delayTolerance].
  1676  func queryBatch(t *testing.T,
  1677  	msgChan1, msgChan2 chan []lnwire.Message,
  1678  	syncer1, syncer2 *GossipSyncer,
  1679  	chanSeries *mockChannelGraphTimeSeries,
  1680  	expDelayResponse bool,
  1681  	delayedQueryInterval, delayTolerance time.Duration) {
  1682  
  1683  	t.Helper()
  1684  
  1685  	// First, we'll assert that syncer1 sends a QueryShortChanIDs message to
  1686  	// the remote peer.
  1687  	select {
  1688  	case <-time.After(time.Second * 2):
  1689  		t.Fatalf("didn't get msg from syncer2")
  1690  
  1691  	case msgs := <-msgChan1:
  1692  		for _, msg := range msgs {
  1693  			// The message MUST be a QueryShortChanIDs message.
  1694  			_, ok := msg.(*lnwire.QueryShortChanIDs)
  1695  			if !ok {
  1696  				t.Fatalf("wrong message: expected "+
  1697  					"QueryShortChanIDs for %T", msg)
  1698  			}
  1699  
  1700  			select {
  1701  			case <-time.After(time.Second * 2):
  1702  				t.Fatalf("node 2 didn't read msg")
  1703  
  1704  			case syncer2.queryMsgs <- msg:
  1705  			}
  1706  		}
  1707  	}
  1708  
  1709  	// We'll then respond to with an empty set of replies (as it doesn't
  1710  	// affect the test).
  1711  	switch {
  1712  
  1713  	// If this query has surpassed the undelayed query threshold, we will
  1714  	// impose stricter timing constraints on the response times. We'll first
  1715  	// test that syncer2's chanSeries doesn't immediately receive a query,
  1716  	// and then check that the query hasn't gone unanswered entirely.
  1717  	case expDelayResponse:
  1718  		// Create a before and after timeout to test, our test
  1719  		// will ensure the messages are delivered to the peer
  1720  		// in this timeframe.
  1721  		before := time.After(
  1722  			delayedQueryInterval - delayTolerance,
  1723  		)
  1724  		after := time.After(
  1725  			delayedQueryInterval + delayTolerance,
  1726  		)
  1727  
  1728  		// First, ensure syncer2 doesn't try to respond up until the
  1729  		// before time fires.
  1730  		select {
  1731  		case <-before:
  1732  			// Query is delayed, proceed.
  1733  
  1734  		case <-chanSeries.annReq:
  1735  			t.Fatalf("DOSy query was not delayed")
  1736  		}
  1737  
  1738  		// If syncer2 doesn't attempt a response within the allowed
  1739  		// interval, then the messages are probably lost.
  1740  		select {
  1741  		case <-after:
  1742  			t.Fatalf("no delayed query received")
  1743  
  1744  		case <-chanSeries.annReq:
  1745  			chanSeries.annResp <- []lnwire.Message{}
  1746  		}
  1747  
  1748  	// Otherwise, syncer2 should query its chanSeries promtly.
  1749  	default:
  1750  		select {
  1751  		case <-time.After(50 * time.Millisecond):
  1752  			t.Fatalf("no query recvd")
  1753  
  1754  		case <-chanSeries.annReq:
  1755  			chanSeries.annResp <- []lnwire.Message{}
  1756  		}
  1757  	}
  1758  
  1759  	// Finally, assert that syncer2 replies to syncer1 with a
  1760  	// ReplyShortChanIDsEnd.
  1761  	select {
  1762  	case <-time.After(50 * time.Millisecond):
  1763  		t.Fatalf("didn't get msg from syncer2")
  1764  
  1765  	case msgs := <-msgChan2:
  1766  		for _, msg := range msgs {
  1767  			// The message MUST be a ReplyShortChanIDsEnd message.
  1768  			_, ok := msg.(*lnwire.ReplyShortChanIDsEnd)
  1769  			if !ok {
  1770  				t.Fatalf("wrong message: expected "+
  1771  					"ReplyShortChanIDsEnd for %T", msg)
  1772  			}
  1773  
  1774  			select {
  1775  			case <-time.After(time.Second * 2):
  1776  				t.Fatalf("node 2 didn't read msg")
  1777  
  1778  			case syncer1.gossipMsgs <- msg:
  1779  			}
  1780  		}
  1781  	}
  1782  }
  1783  
  1784  // TestGossipSyncerRoutineSync tests all state transitions of the main syncer
  1785  // goroutine. This ensures that given an encounter with a peer that has a set
  1786  // of distinct channels, then we'll properly synchronize our channel state with
  1787  // them.
  1788  func TestGossipSyncerRoutineSync(t *testing.T) {
  1789  	t.Parallel()
  1790  
  1791  	// We'll modify the chunk size to be a smaller value, so we can ensure
  1792  	// our chunk parsing works properly. With this value we should get 3
  1793  	// queries: two full chunks, and one lingering chunk.
  1794  	const chunkSize = 2
  1795  
  1796  	// First, we'll create two GossipSyncer instances with a canned
  1797  	// sendToPeer message to allow us to intercept their potential sends.
  1798  	highestID := lnwire.ShortChannelID{
  1799  		BlockHeight: 1144,
  1800  	}
  1801  	msgChan1, syncer1, chanSeries1 := newTestSyncer(
  1802  		highestID, defaultEncoding, chunkSize, true, false,
  1803  	)
  1804  	syncer1.Start()
  1805  	defer syncer1.Stop()
  1806  
  1807  	msgChan2, syncer2, chanSeries2 := newTestSyncer(
  1808  		highestID, defaultEncoding, chunkSize, false, true,
  1809  	)
  1810  	syncer2.Start()
  1811  	defer syncer2.Stop()
  1812  
  1813  	// Although both nodes are at the same height, syncer will have 3 chan
  1814  	// ID's that syncer1 doesn't know of.
  1815  	syncer2Chans := []lnwire.ShortChannelID{
  1816  		{BlockHeight: highestID.BlockHeight - 3},
  1817  		{BlockHeight: highestID.BlockHeight - 2},
  1818  		{BlockHeight: highestID.BlockHeight - 1},
  1819  	}
  1820  
  1821  	// We'll kick off the test by passing over the QueryChannelRange
  1822  	// messages from syncer1 to syncer2.
  1823  	select {
  1824  	case <-time.After(time.Second * 2):
  1825  		t.Fatalf("didn't get msg from syncer1")
  1826  
  1827  	case msgs := <-msgChan1:
  1828  		for _, msg := range msgs {
  1829  			// The message MUST be a QueryChannelRange message.
  1830  			_, ok := msg.(*lnwire.QueryChannelRange)
  1831  			if !ok {
  1832  				t.Fatalf("wrong message: expected "+
  1833  					"QueryChannelRange for %T", msg)
  1834  			}
  1835  
  1836  			select {
  1837  			case <-time.After(time.Second * 2):
  1838  				t.Fatalf("node 2 didn't read msg")
  1839  
  1840  			case syncer2.queryMsgs <- msg:
  1841  
  1842  			}
  1843  		}
  1844  	}
  1845  
  1846  	// At this point, we'll need to send a response from syncer2 to syncer1
  1847  	// using syncer2's channels This will cause syncer1 to simply request
  1848  	// the entire set of channels from the other.
  1849  	select {
  1850  	case <-time.After(time.Second * 2):
  1851  		t.Fatalf("no query recvd")
  1852  
  1853  	case <-chanSeries2.filterRangeReqs:
  1854  		// We'll send back all the channels that it should know of.
  1855  		chanSeries2.filterRangeResp <- syncer2Chans
  1856  	}
  1857  
  1858  	// At this point, we'll assert that syncer2 replies with the
  1859  	// ReplyChannelRange messages. Two replies are expected since the chunk
  1860  	// size is 2, and we need to query for 3 channels.
  1861  	for i := 0; i < chunkSize; i++ {
  1862  		select {
  1863  		case <-time.After(time.Second * 2):
  1864  			t.Fatalf("didn't get msg from syncer2")
  1865  
  1866  		case msgs := <-msgChan2:
  1867  			for _, msg := range msgs {
  1868  				// The message MUST be a ReplyChannelRange message.
  1869  				_, ok := msg.(*lnwire.ReplyChannelRange)
  1870  				if !ok {
  1871  					t.Fatalf("wrong message: expected "+
  1872  						"QueryChannelRange for %T", msg)
  1873  				}
  1874  
  1875  				select {
  1876  				case <-time.After(time.Second * 2):
  1877  					t.Fatalf("node 2 didn't read msg")
  1878  
  1879  				case syncer1.gossipMsgs <- msg:
  1880  				}
  1881  			}
  1882  		}
  1883  	}
  1884  
  1885  	// We'll now send back a chunked response from syncer2 back to sycner1.
  1886  	select {
  1887  	case <-time.After(time.Second * 2):
  1888  		t.Fatalf("no query recvd")
  1889  
  1890  	case <-chanSeries1.filterReq:
  1891  		chanSeries1.filterResp <- syncer2Chans
  1892  	}
  1893  
  1894  	// At this point, syncer1 should start to send out initial requests to
  1895  	// query the chan IDs of the remote party. As the chunk size is 2,
  1896  	// they'll need 2 rounds in order to fully reconcile the state.
  1897  	for i := 0; i < chunkSize; i++ {
  1898  		queryBatch(t,
  1899  			msgChan1, msgChan2,
  1900  			syncer1, syncer2,
  1901  			chanSeries2,
  1902  			false, 0, 0,
  1903  		)
  1904  	}
  1905  
  1906  	// At this stage syncer1 should now be sending over its initial
  1907  	// GossipTimestampRange messages as it should be fully synced.
  1908  	select {
  1909  	case <-time.After(time.Second * 2):
  1910  		t.Fatalf("didn't get msg from syncer1")
  1911  
  1912  	case msgs := <-msgChan1:
  1913  		for _, msg := range msgs {
  1914  			// The message MUST be a GossipTimestampRange message.
  1915  			_, ok := msg.(*lnwire.GossipTimestampRange)
  1916  			if !ok {
  1917  				t.Fatalf("wrong message: expected "+
  1918  					"QueryChannelRange for %T", msg)
  1919  			}
  1920  
  1921  			select {
  1922  			case <-time.After(time.Second * 2):
  1923  				t.Fatalf("node 2 didn't read msg")
  1924  
  1925  			case syncer2.gossipMsgs <- msg:
  1926  
  1927  			}
  1928  		}
  1929  	}
  1930  }
  1931  
  1932  // TestGossipSyncerAlreadySynced tests that if we attempt to synchronize two
  1933  // syncers that have the exact same state, then they'll skip straight to the
  1934  // final state and not perform any channel queries.
  1935  func TestGossipSyncerAlreadySynced(t *testing.T) {
  1936  	t.Parallel()
  1937  
  1938  	// We'll modify the chunk size to be a smaller value, so we can ensure
  1939  	// our chunk parsing works properly. With this value we should get 3
  1940  	// queries: two full chunks, and one lingering chunk.
  1941  	const chunkSize = 2
  1942  	const numChans = 3
  1943  
  1944  	// First, we'll create two GossipSyncer instances with a canned
  1945  	// sendToPeer message to allow us to intercept their potential sends.
  1946  	highestID := lnwire.ShortChannelID{
  1947  		BlockHeight: 1144,
  1948  	}
  1949  	msgChan1, syncer1, chanSeries1 := newTestSyncer(
  1950  		highestID, defaultEncoding, chunkSize,
  1951  	)
  1952  	syncer1.Start()
  1953  	defer syncer1.Stop()
  1954  
  1955  	msgChan2, syncer2, chanSeries2 := newTestSyncer(
  1956  		highestID, defaultEncoding, chunkSize,
  1957  	)
  1958  	syncer2.Start()
  1959  	defer syncer2.Stop()
  1960  
  1961  	// The channel state of both syncers will be identical. They should
  1962  	// recognize this, and skip the sync phase below.
  1963  	var syncer1Chans, syncer2Chans []lnwire.ShortChannelID
  1964  	for i := numChans; i > 0; i-- {
  1965  		shortChanID := lnwire.ShortChannelID{
  1966  			BlockHeight: highestID.BlockHeight - uint32(i),
  1967  		}
  1968  		syncer1Chans = append(syncer1Chans, shortChanID)
  1969  		syncer2Chans = append(syncer2Chans, shortChanID)
  1970  	}
  1971  
  1972  	// We'll now kick off the test by allowing both side to send their
  1973  	// QueryChannelRange messages to each other.
  1974  	select {
  1975  	case <-time.After(time.Second * 2):
  1976  		t.Fatalf("didn't get msg from syncer1")
  1977  
  1978  	case msgs := <-msgChan1:
  1979  		for _, msg := range msgs {
  1980  			// The message MUST be a QueryChannelRange message.
  1981  			_, ok := msg.(*lnwire.QueryChannelRange)
  1982  			if !ok {
  1983  				t.Fatalf("wrong message: expected "+
  1984  					"QueryChannelRange for %T", msg)
  1985  			}
  1986  
  1987  			select {
  1988  			case <-time.After(time.Second * 2):
  1989  				t.Fatalf("node 2 didn't read msg")
  1990  
  1991  			case syncer2.queryMsgs <- msg:
  1992  
  1993  			}
  1994  		}
  1995  	}
  1996  	select {
  1997  	case <-time.After(time.Second * 2):
  1998  		t.Fatalf("didn't get msg from syncer2")
  1999  
  2000  	case msgs := <-msgChan2:
  2001  		for _, msg := range msgs {
  2002  			// The message MUST be a QueryChannelRange message.
  2003  			_, ok := msg.(*lnwire.QueryChannelRange)
  2004  			if !ok {
  2005  				t.Fatalf("wrong message: expected "+
  2006  					"QueryChannelRange for %T", msg)
  2007  			}
  2008  
  2009  			select {
  2010  			case <-time.After(time.Second * 2):
  2011  				t.Fatalf("node 2 didn't read msg")
  2012  
  2013  			case syncer1.queryMsgs <- msg:
  2014  
  2015  			}
  2016  		}
  2017  	}
  2018  
  2019  	// We'll now send back the range each side should send over: the set of
  2020  	// channels they already know about.
  2021  	select {
  2022  	case <-time.After(time.Second * 2):
  2023  		t.Fatalf("no query recvd")
  2024  
  2025  	case <-chanSeries1.filterRangeReqs:
  2026  		// We'll send all the channels that it should know of.
  2027  		chanSeries1.filterRangeResp <- syncer1Chans
  2028  	}
  2029  	select {
  2030  	case <-time.After(time.Second * 2):
  2031  		t.Fatalf("no query recvd")
  2032  
  2033  	case <-chanSeries2.filterRangeReqs:
  2034  		// We'll send back all the channels that it should know of.
  2035  		chanSeries2.filterRangeResp <- syncer2Chans
  2036  	}
  2037  
  2038  	// Next, we'll thread through the replies of both parties. As the chunk
  2039  	// size is 2, and they both know of 3 channels, it'll take two around
  2040  	// and two chunks.
  2041  	for i := 0; i < chunkSize; i++ {
  2042  		select {
  2043  		case <-time.After(time.Second * 2):
  2044  			t.Fatalf("didn't get msg from syncer1")
  2045  
  2046  		case msgs := <-msgChan1:
  2047  			for _, msg := range msgs {
  2048  				// The message MUST be a ReplyChannelRange message.
  2049  				_, ok := msg.(*lnwire.ReplyChannelRange)
  2050  				if !ok {
  2051  					t.Fatalf("wrong message: expected "+
  2052  						"QueryChannelRange for %T", msg)
  2053  				}
  2054  
  2055  				select {
  2056  				case <-time.After(time.Second * 2):
  2057  					t.Fatalf("node 2 didn't read msg")
  2058  
  2059  				case syncer2.gossipMsgs <- msg:
  2060  				}
  2061  			}
  2062  		}
  2063  	}
  2064  	for i := 0; i < chunkSize; i++ {
  2065  		select {
  2066  		case <-time.After(time.Second * 2):
  2067  			t.Fatalf("didn't get msg from syncer2")
  2068  
  2069  		case msgs := <-msgChan2:
  2070  			for _, msg := range msgs {
  2071  				// The message MUST be a ReplyChannelRange message.
  2072  				_, ok := msg.(*lnwire.ReplyChannelRange)
  2073  				if !ok {
  2074  					t.Fatalf("wrong message: expected "+
  2075  						"QueryChannelRange for %T", msg)
  2076  				}
  2077  
  2078  				select {
  2079  				case <-time.After(time.Second * 2):
  2080  					t.Fatalf("node 2 didn't read msg")
  2081  
  2082  				case syncer1.gossipMsgs <- msg:
  2083  				}
  2084  			}
  2085  		}
  2086  	}
  2087  
  2088  	// Now that both sides have the full responses, we'll send over the
  2089  	// channels that they need to filter out. As both sides have the exact
  2090  	// same set of channels, they should skip to the final state.
  2091  	select {
  2092  	case <-time.After(time.Second * 2):
  2093  		t.Fatalf("no query recvd")
  2094  
  2095  	case <-chanSeries1.filterReq:
  2096  		chanSeries1.filterResp <- []lnwire.ShortChannelID{}
  2097  	}
  2098  	select {
  2099  	case <-time.After(time.Second * 2):
  2100  		t.Fatalf("no query recvd")
  2101  
  2102  	case <-chanSeries2.filterReq:
  2103  		chanSeries2.filterResp <- []lnwire.ShortChannelID{}
  2104  	}
  2105  
  2106  	// As both parties are already synced, the next message they send to
  2107  	// each other should be the GossipTimestampRange message.
  2108  	select {
  2109  	case <-time.After(time.Second * 2):
  2110  		t.Fatalf("didn't get msg from syncer1")
  2111  
  2112  	case msgs := <-msgChan1:
  2113  		for _, msg := range msgs {
  2114  			// The message MUST be a GossipTimestampRange message.
  2115  			_, ok := msg.(*lnwire.GossipTimestampRange)
  2116  			if !ok {
  2117  				t.Fatalf("wrong message: expected "+
  2118  					"QueryChannelRange for %T", msg)
  2119  			}
  2120  
  2121  			select {
  2122  			case <-time.After(time.Second * 2):
  2123  				t.Fatalf("node 2 didn't read msg")
  2124  
  2125  			case syncer2.gossipMsgs <- msg:
  2126  
  2127  			}
  2128  		}
  2129  	}
  2130  	select {
  2131  	case <-time.After(time.Second * 2):
  2132  		t.Fatalf("didn't get msg from syncer1")
  2133  
  2134  	case msgs := <-msgChan2:
  2135  		for _, msg := range msgs {
  2136  			// The message MUST be a GossipTimestampRange message.
  2137  			_, ok := msg.(*lnwire.GossipTimestampRange)
  2138  			if !ok {
  2139  				t.Fatalf("wrong message: expected "+
  2140  					"QueryChannelRange for %T", msg)
  2141  			}
  2142  
  2143  			select {
  2144  			case <-time.After(time.Second * 2):
  2145  				t.Fatalf("node 2 didn't read msg")
  2146  
  2147  			case syncer1.gossipMsgs <- msg:
  2148  
  2149  			}
  2150  		}
  2151  	}
  2152  }
  2153  
  2154  // TestGossipSyncerSyncTransitions ensures that the gossip syncer properly
  2155  // carries out its duties when accepting a new sync transition request.
  2156  func TestGossipSyncerSyncTransitions(t *testing.T) {
  2157  	t.Parallel()
  2158  
  2159  	assertMsgSent := func(t *testing.T, msgChan chan []lnwire.Message,
  2160  		msg lnwire.Message) {
  2161  
  2162  		t.Helper()
  2163  
  2164  		var msgSent lnwire.Message
  2165  		select {
  2166  		case msgs := <-msgChan:
  2167  			if len(msgs) != 1 {
  2168  				t.Fatalf("expected to send a single message at "+
  2169  					"a time, got %d", len(msgs))
  2170  			}
  2171  			msgSent = msgs[0]
  2172  		case <-time.After(time.Second):
  2173  			t.Fatalf("expected to send %T message", msg)
  2174  		}
  2175  
  2176  		if !reflect.DeepEqual(msgSent, msg) {
  2177  			t.Fatalf("expected to send message: %v\ngot: %v",
  2178  				spew.Sdump(msg), spew.Sdump(msgSent))
  2179  		}
  2180  	}
  2181  
  2182  	tests := []struct {
  2183  		name          string
  2184  		entrySyncType SyncerType
  2185  		finalSyncType SyncerType
  2186  		assert        func(t *testing.T, msgChan chan []lnwire.Message,
  2187  			syncer *GossipSyncer)
  2188  	}{
  2189  		{
  2190  			name:          "active to passive",
  2191  			entrySyncType: ActiveSync,
  2192  			finalSyncType: PassiveSync,
  2193  			assert: func(t *testing.T, msgChan chan []lnwire.Message,
  2194  				g *GossipSyncer) {
  2195  
  2196  				// The processing loop of the syncer sends an
  2197  				// initial message to start receiving updates.
  2198  				// Consume it.
  2199  				firstTimestamp := uint32(time.Now().Unix())
  2200  				assertMsgSent(t, msgChan, &lnwire.GossipTimestampRange{
  2201  					FirstTimestamp: firstTimestamp,
  2202  					TimestampRange: math.MaxUint32,
  2203  				})
  2204  
  2205  				// When transitioning from active to passive, we
  2206  				// should expect to see a new local update
  2207  				// horizon sent to the remote peer indicating
  2208  				// that it would not like to receive any future
  2209  				// updates.
  2210  				assertMsgSent(t, msgChan, &lnwire.GossipTimestampRange{
  2211  					FirstTimestamp: uint32(zeroTimestamp.Unix()),
  2212  					TimestampRange: 0,
  2213  				})
  2214  
  2215  				syncState := g.syncState()
  2216  				if syncState != chansSynced {
  2217  					t.Fatalf("expected syncerState %v, "+
  2218  						"got %v", chansSynced, syncState)
  2219  				}
  2220  			},
  2221  		},
  2222  		{
  2223  			name:          "passive to active",
  2224  			entrySyncType: PassiveSync,
  2225  			finalSyncType: ActiveSync,
  2226  			assert: func(t *testing.T, msgChan chan []lnwire.Message,
  2227  				g *GossipSyncer) {
  2228  
  2229  				// When transitioning from historical to active,
  2230  				// we should expect to see a new local update
  2231  				// horizon sent to the remote peer indicating
  2232  				// that it would like to receive any future
  2233  				// updates.
  2234  				firstTimestamp := uint32(time.Now().Unix())
  2235  				assertMsgSent(t, msgChan, &lnwire.GossipTimestampRange{
  2236  					FirstTimestamp: firstTimestamp,
  2237  					TimestampRange: math.MaxUint32,
  2238  				})
  2239  
  2240  				syncState := g.syncState()
  2241  				if syncState != chansSynced {
  2242  					t.Fatalf("expected syncerState %v, "+
  2243  						"got %v", chansSynced, syncState)
  2244  				}
  2245  			},
  2246  		},
  2247  	}
  2248  
  2249  	for _, test := range tests {
  2250  		test := test
  2251  		t.Run(test.name, func(t *testing.T) {
  2252  			t.Parallel()
  2253  
  2254  			// We'll start each test by creating our syncer. We'll
  2255  			// initialize it with a state of chansSynced, as that's
  2256  			// the only time when it can process sync transitions.
  2257  			msgChan, syncer, _ := newTestSyncer(
  2258  				lnwire.ShortChannelID{
  2259  					BlockHeight: latestKnownHeight,
  2260  				},
  2261  				defaultEncoding, defaultChunkSize,
  2262  			)
  2263  			syncer.setSyncState(chansSynced)
  2264  
  2265  			// We'll set the initial syncType to what the test
  2266  			// demands.
  2267  			syncer.setSyncType(test.entrySyncType)
  2268  
  2269  			// We'll then start the syncer in order to process the
  2270  			// request.
  2271  			syncer.Start()
  2272  			defer syncer.Stop()
  2273  
  2274  			syncer.ProcessSyncTransition(test.finalSyncType)
  2275  
  2276  			// The syncer should now have the expected final
  2277  			// SyncerType that the test expects.
  2278  			syncType := syncer.SyncType()
  2279  			if syncType != test.finalSyncType {
  2280  				t.Fatalf("expected syncType %v, got %v",
  2281  					test.finalSyncType, syncType)
  2282  			}
  2283  
  2284  			// Finally, we'll run a set of assertions for each test
  2285  			// to ensure the syncer performed its expected duties
  2286  			// after processing its sync transition.
  2287  			test.assert(t, msgChan, syncer)
  2288  		})
  2289  	}
  2290  }
  2291  
  2292  // TestGossipSyncerHistoricalSync tests that a gossip syncer can perform a
  2293  // historical sync with the remote peer.
  2294  func TestGossipSyncerHistoricalSync(t *testing.T) {
  2295  	t.Parallel()
  2296  
  2297  	// We'll create a new gossip syncer and manually override its state to
  2298  	// chansSynced. This is necessary as the syncer can only process
  2299  	// historical sync requests in this state.
  2300  	msgChan, syncer, _ := newTestSyncer(
  2301  		lnwire.ShortChannelID{BlockHeight: latestKnownHeight},
  2302  		defaultEncoding, defaultChunkSize,
  2303  	)
  2304  	syncer.setSyncType(PassiveSync)
  2305  	syncer.setSyncState(chansSynced)
  2306  
  2307  	syncer.Start()
  2308  	defer syncer.Stop()
  2309  
  2310  	syncer.historicalSync()
  2311  
  2312  	// We should expect to see a single lnwire.QueryChannelRange message be
  2313  	// sent to the remote peer with a FirstBlockHeight of 0.
  2314  	expectedMsg := &lnwire.QueryChannelRange{
  2315  		FirstBlockHeight: 0,
  2316  		NumBlocks:        latestKnownHeight,
  2317  	}
  2318  
  2319  	select {
  2320  	case msgs := <-msgChan:
  2321  		if len(msgs) != 1 {
  2322  			t.Fatalf("expected to send a single "+
  2323  				"lnwire.QueryChannelRange message, got %d",
  2324  				len(msgs))
  2325  		}
  2326  		if !reflect.DeepEqual(msgs[0], expectedMsg) {
  2327  			t.Fatalf("expected to send message: %v\ngot: %v",
  2328  				spew.Sdump(expectedMsg), spew.Sdump(msgs[0]))
  2329  		}
  2330  	case <-time.After(time.Second):
  2331  		t.Fatalf("expected to send a lnwire.QueryChannelRange message")
  2332  	}
  2333  }
  2334  
  2335  // TestGossipSyncerSyncedSignal ensures that we receive a signal when a gossip
  2336  // syncer reaches its terminal chansSynced state.
  2337  func TestGossipSyncerSyncedSignal(t *testing.T) {
  2338  	t.Parallel()
  2339  
  2340  	// We'll create a new gossip syncer and manually override its state to
  2341  	// chansSynced.
  2342  	_, syncer, _ := newTestSyncer(
  2343  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
  2344  		defaultChunkSize,
  2345  	)
  2346  	syncer.setSyncState(chansSynced)
  2347  
  2348  	// We'll go ahead and request a signal to be notified of when it reaches
  2349  	// this state.
  2350  	signalChan := syncer.ResetSyncedSignal()
  2351  
  2352  	// Starting the gossip syncer should cause the signal to be delivered.
  2353  	syncer.Start()
  2354  
  2355  	select {
  2356  	case <-signalChan:
  2357  	case <-time.After(time.Second):
  2358  		t.Fatal("expected to receive chansSynced signal")
  2359  	}
  2360  
  2361  	syncer.Stop()
  2362  
  2363  	// We'll try this again, but this time we'll request the signal after
  2364  	// the syncer is active and has already reached its chansSynced state.
  2365  	_, syncer, _ = newTestSyncer(
  2366  		lnwire.NewShortChanIDFromInt(10), defaultEncoding,
  2367  		defaultChunkSize,
  2368  	)
  2369  
  2370  	syncer.setSyncState(chansSynced)
  2371  
  2372  	syncer.Start()
  2373  	defer syncer.Stop()
  2374  
  2375  	signalChan = syncer.ResetSyncedSignal()
  2376  
  2377  	// The signal should be delivered immediately.
  2378  	select {
  2379  	case <-signalChan:
  2380  	case <-time.After(time.Second):
  2381  		t.Fatal("expected to receive chansSynced signal")
  2382  	}
  2383  }
  2384  
  2385  // TestGossipSyncerMaxChannelRangeReplies ensures that a gossip syncer
  2386  // transitions its state after receiving the maximum possible number of replies
  2387  // for a single QueryChannelRange message, and that any further replies after
  2388  // said limit are not processed.
  2389  func TestGossipSyncerMaxChannelRangeReplies(t *testing.T) {
  2390  	t.Parallel()
  2391  
  2392  	msgChan, syncer, chanSeries := newTestSyncer(
  2393  		lnwire.ShortChannelID{BlockHeight: latestKnownHeight},
  2394  		defaultEncoding, defaultChunkSize,
  2395  	)
  2396  
  2397  	// We'll tune the maxQueryChanRangeReplies to a more sensible value for
  2398  	// the sake of testing.
  2399  	syncer.cfg.maxQueryChanRangeReplies = 100
  2400  
  2401  	syncer.Start()
  2402  	defer syncer.Stop()
  2403  
  2404  	// Upon initialization, the syncer should submit a QueryChannelRange
  2405  	// request.
  2406  	var query *lnwire.QueryChannelRange
  2407  	select {
  2408  	case msgs := <-msgChan:
  2409  		require.Len(t, msgs, 1)
  2410  		require.IsType(t, &lnwire.QueryChannelRange{}, msgs[0])
  2411  		query = msgs[0].(*lnwire.QueryChannelRange)
  2412  
  2413  	case <-time.After(time.Second):
  2414  		t.Fatal("expected query channel range request msg")
  2415  	}
  2416  
  2417  	// We'll send the maximum number of replies allowed to a
  2418  	// QueryChannelRange request with each reply consuming only one block in
  2419  	// order to transition the syncer's state.
  2420  	for i := uint32(0); i < syncer.cfg.maxQueryChanRangeReplies; i++ {
  2421  		reply := &lnwire.ReplyChannelRange{
  2422  			ChainHash:        query.ChainHash,
  2423  			FirstBlockHeight: query.FirstBlockHeight,
  2424  			NumBlocks:        query.NumBlocks,
  2425  			ShortChanIDs: []lnwire.ShortChannelID{
  2426  				{
  2427  					BlockHeight: query.FirstBlockHeight + i,
  2428  				},
  2429  			},
  2430  		}
  2431  		reply.FirstBlockHeight = query.FirstBlockHeight + i
  2432  		reply.NumBlocks = 1
  2433  		require.NoError(t, syncer.ProcessQueryMsg(reply, nil))
  2434  	}
  2435  
  2436  	// We should receive a filter request for the syncer's local channels
  2437  	// after processing all of the replies. We'll send back a nil response
  2438  	// indicating that no new channels need to be synced, so it should
  2439  	// transition to its final chansSynced state.
  2440  	select {
  2441  	case <-chanSeries.filterReq:
  2442  	case <-time.After(time.Second):
  2443  		t.Fatal("expected local filter request of known channels")
  2444  	}
  2445  	select {
  2446  	case chanSeries.filterResp <- nil:
  2447  	case <-time.After(time.Second):
  2448  		t.Fatal("timed out sending filter response")
  2449  	}
  2450  	assertSyncerStatus(t, syncer, chansSynced, ActiveSync)
  2451  
  2452  	// Finally, attempting to process another reply for the same query
  2453  	// should result in an error.
  2454  	require.Error(t, syncer.ProcessQueryMsg(&lnwire.ReplyChannelRange{
  2455  		ChainHash:        query.ChainHash,
  2456  		FirstBlockHeight: query.FirstBlockHeight,
  2457  		NumBlocks:        query.NumBlocks,
  2458  		ShortChanIDs: []lnwire.ShortChannelID{
  2459  			{
  2460  				BlockHeight: query.LastBlockHeight() + 1,
  2461  			},
  2462  		},
  2463  	}, nil))
  2464  }