github.com/decred/dcrlnd@v0.7.6/discovery/gossiper_test.go (about)

     1  package discovery
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/hex"
     6  	"fmt"
     7  	"io/ioutil"
     8  	prand "math/rand"
     9  	"net"
    10  	"os"
    11  	"reflect"
    12  	"strings"
    13  	"sync"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/davecgh/go-spew/spew"
    19  	"github.com/decred/dcrd/chaincfg/chainhash"
    20  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    21  	"github.com/decred/dcrd/dcrutil/v4"
    22  	"github.com/decred/dcrd/wire"
    23  	"github.com/decred/dcrlnd/batch"
    24  	"github.com/decred/dcrlnd/chainntnfs"
    25  	"github.com/decred/dcrlnd/channeldb"
    26  	"github.com/decred/dcrlnd/keychain"
    27  	"github.com/decred/dcrlnd/kvdb"
    28  	"github.com/decred/dcrlnd/lnpeer"
    29  	"github.com/decred/dcrlnd/lntest/mock"
    30  	"github.com/decred/dcrlnd/lntest/wait"
    31  	"github.com/decred/dcrlnd/lnwire"
    32  	"github.com/decred/dcrlnd/netann"
    33  	"github.com/decred/dcrlnd/routing"
    34  	"github.com/decred/dcrlnd/routing/route"
    35  	"github.com/decred/dcrlnd/ticker"
    36  	"github.com/go-errors/errors"
    37  	"github.com/stretchr/testify/require"
    38  )
    39  
    40  var (
    41  	testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
    42  		Port: 9000}
    43  	testAddrs    = []net.Addr{testAddr}
    44  	testFeatures = lnwire.NewRawFeatureVector()
    45  
    46  	testKeyLoc = keychain.KeyLocator{Family: keychain.KeyFamilyNodeKey}
    47  
    48  	selfKeyPriv, _ = secp256k1.GeneratePrivateKey()
    49  	selfKeyDesc    = &keychain.KeyDescriptor{
    50  		PubKey:     selfKeyPriv.PubKey(),
    51  		KeyLocator: testKeyLoc,
    52  	}
    53  
    54  	decredKeyPriv1, _ = secp256k1.GeneratePrivateKey()
    55  	decredKeyPub1     = decredKeyPriv1.PubKey()
    56  
    57  	remoteKeyPriv1, _ = secp256k1.GeneratePrivateKey()
    58  	remoteKeyPub1     = remoteKeyPriv1.PubKey()
    59  
    60  	decredKeyPriv2, _ = secp256k1.GeneratePrivateKey()
    61  	decredKeyPub2     = decredKeyPriv2.PubKey()
    62  
    63  	remoteKeyPriv2, _ = secp256k1.GeneratePrivateKey()
    64  
    65  	trickleDelay     = time.Millisecond * 100
    66  	retransmitDelay  = time.Hour * 1
    67  	proofMatureDelta uint32
    68  
    69  	// The test timestamp + rebroadcast interval makes sure messages won't
    70  	// be rebroadcasted automaticallty during the tests.
    71  	testTimestamp       = uint32(1234567890)
    72  	rebroadcastInterval = time.Hour * 1000000
    73  )
    74  
    75  // makeTestDB creates a new instance of the ChannelDB for testing purposes. A
    76  // callback which cleans up the created temporary directories is also returned
    77  // and intended to be executed after the test completes.
    78  func makeTestDB() (*channeldb.DB, func(), error) {
    79  	// First, create a temporary directory to be used for the duration of
    80  	// this test.
    81  	tempDirName, err := ioutil.TempDir("", "channeldb")
    82  	if err != nil {
    83  		return nil, nil, err
    84  	}
    85  
    86  	// Next, create channeldb for the first time.
    87  	cdb, err := channeldb.Open(tempDirName)
    88  	if err != nil {
    89  		return nil, nil, err
    90  	}
    91  
    92  	cleanUp := func() {
    93  		cdb.Close()
    94  		os.RemoveAll(tempDirName)
    95  	}
    96  
    97  	return cdb, cleanUp, nil
    98  }
    99  
   100  type mockGraphSource struct {
   101  	bestHeight uint32
   102  
   103  	mu            sync.Mutex
   104  	nodes         []channeldb.LightningNode
   105  	infos         map[uint64]channeldb.ChannelEdgeInfo
   106  	edges         map[uint64][]channeldb.ChannelEdgePolicy
   107  	zombies       map[uint64][][33]byte
   108  	chansToReject map[uint64]struct{}
   109  }
   110  
   111  func newMockRouter(height uint32) *mockGraphSource {
   112  	return &mockGraphSource{
   113  		bestHeight:    height,
   114  		infos:         make(map[uint64]channeldb.ChannelEdgeInfo),
   115  		edges:         make(map[uint64][]channeldb.ChannelEdgePolicy),
   116  		zombies:       make(map[uint64][][33]byte),
   117  		chansToReject: make(map[uint64]struct{}),
   118  	}
   119  }
   120  
   121  var _ routing.ChannelGraphSource = (*mockGraphSource)(nil)
   122  
   123  func (r *mockGraphSource) AddNode(node *channeldb.LightningNode,
   124  	_ ...batch.SchedulerOption) error {
   125  
   126  	r.mu.Lock()
   127  	defer r.mu.Unlock()
   128  
   129  	r.nodes = append(r.nodes, *node)
   130  	return nil
   131  }
   132  
   133  func (r *mockGraphSource) AddEdge(info *channeldb.ChannelEdgeInfo,
   134  	_ ...batch.SchedulerOption) error {
   135  
   136  	r.mu.Lock()
   137  	defer r.mu.Unlock()
   138  
   139  	if _, ok := r.infos[info.ChannelID]; ok {
   140  		return errors.New("info already exist")
   141  	}
   142  
   143  	if _, ok := r.chansToReject[info.ChannelID]; ok {
   144  		return errors.New("validation failed")
   145  	}
   146  
   147  	r.infos[info.ChannelID] = *info
   148  	return nil
   149  }
   150  
   151  func (r *mockGraphSource) queueValidationFail(chanID uint64) {
   152  	r.mu.Lock()
   153  	defer r.mu.Unlock()
   154  
   155  	r.chansToReject[chanID] = struct{}{}
   156  }
   157  
   158  func (r *mockGraphSource) UpdateEdge(edge *channeldb.ChannelEdgePolicy,
   159  	_ ...batch.SchedulerOption) error {
   160  
   161  	r.mu.Lock()
   162  	defer r.mu.Unlock()
   163  
   164  	if len(r.edges[edge.ChannelID]) == 0 {
   165  		r.edges[edge.ChannelID] = make([]channeldb.ChannelEdgePolicy, 2)
   166  	}
   167  
   168  	if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
   169  		r.edges[edge.ChannelID][0] = *edge
   170  	} else {
   171  		r.edges[edge.ChannelID][1] = *edge
   172  	}
   173  
   174  	return nil
   175  }
   176  
   177  func (r *mockGraphSource) CurrentBlockHeight() (uint32, error) {
   178  	return r.bestHeight, nil
   179  }
   180  
   181  func (r *mockGraphSource) AddProof(chanID lnwire.ShortChannelID,
   182  	proof *channeldb.ChannelAuthProof) error {
   183  
   184  	r.mu.Lock()
   185  	defer r.mu.Unlock()
   186  
   187  	chanIDInt := chanID.ToUint64()
   188  	info, ok := r.infos[chanIDInt]
   189  	if !ok {
   190  		return errors.New("channel does not exist")
   191  	}
   192  
   193  	info.AuthProof = proof
   194  	r.infos[chanIDInt] = info
   195  
   196  	return nil
   197  }
   198  
   199  func (r *mockGraphSource) ForEachNode(func(node *channeldb.LightningNode) error) error {
   200  	return nil
   201  }
   202  
   203  func (r *mockGraphSource) ForAllOutgoingChannels(cb func(tx kvdb.RTx,
   204  	i *channeldb.ChannelEdgeInfo,
   205  	c *channeldb.ChannelEdgePolicy) error) error {
   206  
   207  	r.mu.Lock()
   208  	defer r.mu.Unlock()
   209  
   210  	chans := make(map[uint64]channeldb.ChannelEdge)
   211  	for _, info := range r.infos {
   212  		info := info
   213  
   214  		edgeInfo := chans[info.ChannelID]
   215  		edgeInfo.Info = &info
   216  		chans[info.ChannelID] = edgeInfo
   217  	}
   218  	for _, edges := range r.edges {
   219  		edges := edges
   220  
   221  		edge := chans[edges[0].ChannelID]
   222  		edge.Policy1 = &edges[0]
   223  		chans[edges[0].ChannelID] = edge
   224  	}
   225  
   226  	for _, channel := range chans {
   227  		if err := cb(nil, channel.Info, channel.Policy1); err != nil {
   228  			return err
   229  		}
   230  	}
   231  
   232  	return nil
   233  }
   234  
   235  func (r *mockGraphSource) ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo,
   236  	e1, e2 *channeldb.ChannelEdgePolicy) error) error {
   237  	return nil
   238  }
   239  
   240  func (r *mockGraphSource) GetChannelByID(chanID lnwire.ShortChannelID) (
   241  	*channeldb.ChannelEdgeInfo,
   242  	*channeldb.ChannelEdgePolicy,
   243  	*channeldb.ChannelEdgePolicy, error) {
   244  
   245  	r.mu.Lock()
   246  	defer r.mu.Unlock()
   247  
   248  	chanIDInt := chanID.ToUint64()
   249  	chanInfo, ok := r.infos[chanIDInt]
   250  	if !ok {
   251  		pubKeys, isZombie := r.zombies[chanIDInt]
   252  		if !isZombie {
   253  			return nil, nil, nil, channeldb.ErrEdgeNotFound
   254  		}
   255  
   256  		return &channeldb.ChannelEdgeInfo{
   257  			NodeKey1Bytes: pubKeys[0],
   258  			NodeKey2Bytes: pubKeys[1],
   259  		}, nil, nil, channeldb.ErrZombieEdge
   260  	}
   261  
   262  	edges := r.edges[chanID.ToUint64()]
   263  	if len(edges) == 0 {
   264  		return &chanInfo, nil, nil, nil
   265  	}
   266  
   267  	var edge1 *channeldb.ChannelEdgePolicy
   268  	if !reflect.DeepEqual(edges[0], channeldb.ChannelEdgePolicy{}) {
   269  		edge1 = &edges[0]
   270  	}
   271  
   272  	var edge2 *channeldb.ChannelEdgePolicy
   273  	if !reflect.DeepEqual(edges[1], channeldb.ChannelEdgePolicy{}) {
   274  		edge2 = &edges[1]
   275  	}
   276  
   277  	return &chanInfo, edge1, edge2, nil
   278  }
   279  
   280  func (r *mockGraphSource) FetchLightningNode(
   281  	nodePub route.Vertex) (*channeldb.LightningNode, error) {
   282  
   283  	for _, node := range r.nodes {
   284  		if bytes.Equal(nodePub[:], node.PubKeyBytes[:]) {
   285  			return &node, nil
   286  		}
   287  	}
   288  
   289  	return nil, channeldb.ErrGraphNodeNotFound
   290  }
   291  
   292  // IsStaleNode returns true if the graph source has a node announcement for the
   293  // target node with a more recent timestamp.
   294  func (r *mockGraphSource) IsStaleNode(nodePub route.Vertex, timestamp time.Time) bool {
   295  	r.mu.Lock()
   296  	defer r.mu.Unlock()
   297  
   298  	for _, node := range r.nodes {
   299  		if node.PubKeyBytes == nodePub {
   300  			return node.LastUpdate.After(timestamp) ||
   301  				node.LastUpdate.Equal(timestamp)
   302  		}
   303  	}
   304  
   305  	// If we did not find the node among our existing graph nodes, we
   306  	// require the node to already have a channel in the graph to not be
   307  	// considered stale.
   308  	for _, info := range r.infos {
   309  		if info.NodeKey1Bytes == nodePub {
   310  			return false
   311  		}
   312  		if info.NodeKey2Bytes == nodePub {
   313  			return false
   314  		}
   315  	}
   316  	return true
   317  }
   318  
   319  // IsPublicNode determines whether the given vertex is seen as a public node in
   320  // the graph from the graph's source node's point of view.
   321  func (r *mockGraphSource) IsPublicNode(node route.Vertex) (bool, error) {
   322  	for _, info := range r.infos {
   323  		if !bytes.Equal(node[:], info.NodeKey1Bytes[:]) &&
   324  			!bytes.Equal(node[:], info.NodeKey2Bytes[:]) {
   325  			continue
   326  		}
   327  
   328  		if info.AuthProof != nil {
   329  			return true, nil
   330  		}
   331  	}
   332  	return false, nil
   333  }
   334  
   335  // IsKnownEdge returns true if the graph source already knows of the passed
   336  // channel ID either as a live or zombie channel.
   337  func (r *mockGraphSource) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
   338  	r.mu.Lock()
   339  	defer r.mu.Unlock()
   340  
   341  	chanIDInt := chanID.ToUint64()
   342  	_, exists := r.infos[chanIDInt]
   343  	_, isZombie := r.zombies[chanIDInt]
   344  	return exists || isZombie
   345  }
   346  
   347  // IsStaleEdgePolicy returns true if the graph source has a channel edge for
   348  // the passed channel ID (and flags) that have a more recent timestamp.
   349  func (r *mockGraphSource) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
   350  	timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
   351  
   352  	r.mu.Lock()
   353  	defer r.mu.Unlock()
   354  
   355  	chanIDInt := chanID.ToUint64()
   356  	edges, ok := r.edges[chanIDInt]
   357  	if !ok {
   358  		// Since the edge doesn't exist, we'll check our zombie index as
   359  		// well.
   360  		_, isZombie := r.zombies[chanIDInt]
   361  		if !isZombie {
   362  			return false
   363  		}
   364  
   365  		// Since it exists within our zombie index, we'll check that it
   366  		// respects the router's live edge horizon to determine whether
   367  		// it is stale or not.
   368  		return time.Since(timestamp) > routing.DefaultChannelPruneExpiry
   369  	}
   370  
   371  	switch {
   372  	case flags&lnwire.ChanUpdateDirection == 0 &&
   373  		!reflect.DeepEqual(edges[0], channeldb.ChannelEdgePolicy{}):
   374  
   375  		return !timestamp.After(edges[0].LastUpdate)
   376  
   377  	case flags&lnwire.ChanUpdateDirection == 1 &&
   378  		!reflect.DeepEqual(edges[1], channeldb.ChannelEdgePolicy{}):
   379  
   380  		return !timestamp.After(edges[1].LastUpdate)
   381  
   382  	default:
   383  		return false
   384  	}
   385  }
   386  
   387  // MarkEdgeLive clears an edge from our zombie index, deeming it as live.
   388  //
   389  // NOTE: This method is part of the ChannelGraphSource interface.
   390  func (r *mockGraphSource) MarkEdgeLive(chanID lnwire.ShortChannelID) error {
   391  	r.mu.Lock()
   392  	defer r.mu.Unlock()
   393  	delete(r.zombies, chanID.ToUint64())
   394  	return nil
   395  }
   396  
   397  // MarkEdgeZombie marks an edge as a zombie within our zombie index.
   398  func (r *mockGraphSource) MarkEdgeZombie(chanID lnwire.ShortChannelID, pubKey1,
   399  	pubKey2 [33]byte) error {
   400  
   401  	r.mu.Lock()
   402  	defer r.mu.Unlock()
   403  
   404  	r.zombies[chanID.ToUint64()] = [][33]byte{pubKey1, pubKey2}
   405  
   406  	return nil
   407  }
   408  
   409  type mockNotifier struct {
   410  	clientCounter uint32
   411  	epochClients  map[uint32]chan *chainntnfs.BlockEpoch
   412  
   413  	sync.RWMutex
   414  }
   415  
   416  func newMockNotifier() *mockNotifier {
   417  	return &mockNotifier{
   418  		epochClients: make(map[uint32]chan *chainntnfs.BlockEpoch),
   419  	}
   420  }
   421  
   422  func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
   423  	_ []byte, numConfs, _ uint32) (*chainntnfs.ConfirmationEvent, error) {
   424  
   425  	return nil, nil
   426  }
   427  
   428  func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte,
   429  	_ uint32) (*chainntnfs.SpendEvent, error) {
   430  	return nil, nil
   431  }
   432  
   433  func (m *mockNotifier) RegisterBlockEpochNtfn(
   434  	bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
   435  	m.RLock()
   436  	defer m.RUnlock()
   437  
   438  	epochChan := make(chan *chainntnfs.BlockEpoch)
   439  	clientID := m.clientCounter
   440  	m.clientCounter++
   441  	m.epochClients[clientID] = epochChan
   442  
   443  	return &chainntnfs.BlockEpochEvent{
   444  		Epochs: epochChan,
   445  		Cancel: func() {},
   446  	}, nil
   447  }
   448  
   449  func (m *mockNotifier) Start() error {
   450  	return nil
   451  }
   452  
   453  func (m *mockNotifier) Started() bool {
   454  	return true
   455  }
   456  
   457  func (m *mockNotifier) Stop() error {
   458  	return nil
   459  }
   460  
   461  type annBatch struct {
   462  	nodeAnn1 *lnwire.NodeAnnouncement
   463  	nodeAnn2 *lnwire.NodeAnnouncement
   464  
   465  	chanAnn *lnwire.ChannelAnnouncement
   466  
   467  	chanUpdAnn1 *lnwire.ChannelUpdate
   468  	chanUpdAnn2 *lnwire.ChannelUpdate
   469  
   470  	localProofAnn  *lnwire.AnnounceSignatures
   471  	remoteProofAnn *lnwire.AnnounceSignatures
   472  }
   473  
   474  func createLocalAnnouncements(blockHeight uint32) (*annBatch, error) {
   475  	return createAnnouncements(blockHeight, selfKeyPriv, remoteKeyPriv1)
   476  }
   477  
   478  func createRemoteAnnouncements(blockHeight uint32) (*annBatch, error) {
   479  	return createAnnouncements(blockHeight, remoteKeyPriv1, remoteKeyPriv2)
   480  }
   481  
   482  func createAnnouncements(blockHeight uint32, key1, key2 *secp256k1.PrivateKey) (*annBatch, error) {
   483  	var err error
   484  	var batch annBatch
   485  	timestamp := testTimestamp
   486  
   487  	batch.nodeAnn1, err = createNodeAnnouncement(key1, timestamp)
   488  	if err != nil {
   489  		return nil, err
   490  	}
   491  
   492  	batch.nodeAnn2, err = createNodeAnnouncement(key2, timestamp)
   493  	if err != nil {
   494  		return nil, err
   495  	}
   496  
   497  	batch.chanAnn, err = createChannelAnnouncement(blockHeight, key1, key2)
   498  	if err != nil {
   499  		return nil, err
   500  	}
   501  
   502  	batch.remoteProofAnn = &lnwire.AnnounceSignatures{
   503  		ShortChannelID: lnwire.ShortChannelID{
   504  			BlockHeight: blockHeight,
   505  		},
   506  		NodeSignature:   batch.chanAnn.NodeSig2,
   507  		DecredSignature: batch.chanAnn.DecredSig2,
   508  	}
   509  
   510  	batch.localProofAnn = &lnwire.AnnounceSignatures{
   511  		ShortChannelID: lnwire.ShortChannelID{
   512  			BlockHeight: blockHeight,
   513  		},
   514  		NodeSignature:   batch.chanAnn.NodeSig1,
   515  		DecredSignature: batch.chanAnn.DecredSig1,
   516  	}
   517  
   518  	batch.chanUpdAnn1, err = createUpdateAnnouncement(
   519  		blockHeight, 0, key1, timestamp,
   520  	)
   521  	if err != nil {
   522  		return nil, err
   523  	}
   524  
   525  	batch.chanUpdAnn2, err = createUpdateAnnouncement(
   526  		blockHeight, 1, key2, timestamp,
   527  	)
   528  	if err != nil {
   529  		return nil, err
   530  	}
   531  
   532  	return &batch, nil
   533  
   534  }
   535  
   536  func createNodeAnnouncement(priv *secp256k1.PrivateKey,
   537  	timestamp uint32, extraBytes ...[]byte) (*lnwire.NodeAnnouncement, error) {
   538  
   539  	var err error
   540  	k := hex.EncodeToString(priv.Serialize())
   541  	alias, err := lnwire.NewNodeAlias("kek" + k[:10])
   542  	if err != nil {
   543  		return nil, err
   544  	}
   545  
   546  	a := &lnwire.NodeAnnouncement{
   547  		Timestamp: timestamp,
   548  		Addresses: testAddrs,
   549  		Alias:     alias,
   550  		Features:  testFeatures,
   551  	}
   552  	copy(a.NodeID[:], priv.PubKey().SerializeCompressed())
   553  	if len(extraBytes) == 1 {
   554  		a.ExtraOpaqueData = extraBytes[0]
   555  	}
   556  
   557  	signer := mock.SingleSigner{Privkey: priv}
   558  	sig, err := netann.SignAnnouncement(&signer, testKeyLoc, a)
   559  	if err != nil {
   560  		return nil, err
   561  	}
   562  
   563  	a.Signature, err = lnwire.NewSigFromSignature(sig)
   564  	if err != nil {
   565  		return nil, err
   566  	}
   567  
   568  	return a, nil
   569  }
   570  
   571  func createUpdateAnnouncement(blockHeight uint32,
   572  	flags lnwire.ChanUpdateChanFlags,
   573  	nodeKey *secp256k1.PrivateKey, timestamp uint32,
   574  	extraBytes ...[]byte) (*lnwire.ChannelUpdate, error) {
   575  
   576  	var err error
   577  
   578  	htlcMinMatoms := lnwire.MilliAtom(prand.Int63())
   579  	a := &lnwire.ChannelUpdate{
   580  		ShortChannelID: lnwire.ShortChannelID{
   581  			BlockHeight: blockHeight,
   582  		},
   583  		Timestamp:         timestamp,
   584  		TimeLockDelta:     uint16(prand.Int63()),
   585  		MessageFlags:      lnwire.ChanUpdateOptionMaxHtlc,
   586  		ChannelFlags:      flags,
   587  		HtlcMinimumMAtoms: htlcMinMatoms,
   588  
   589  		// Since the max HTLC must be greater than the min HTLC to pass
   590  		// channel update validation, set it to double the min htlc.
   591  		HtlcMaximumMAtoms: 2 * htlcMinMatoms,
   592  		FeeRate:           uint32(prand.Int31()),
   593  		BaseFee:           uint32(prand.Int31()),
   594  	}
   595  	if len(extraBytes) == 1 {
   596  		a.ExtraOpaqueData = extraBytes[0]
   597  	}
   598  
   599  	err = signUpdate(nodeKey, a)
   600  	if err != nil {
   601  		return nil, err
   602  	}
   603  
   604  	return a, nil
   605  }
   606  
   607  func signUpdate(nodeKey *secp256k1.PrivateKey, a *lnwire.ChannelUpdate) error {
   608  	signer := mock.SingleSigner{Privkey: nodeKey}
   609  	sig, err := netann.SignAnnouncement(&signer, testKeyLoc, a)
   610  	if err != nil {
   611  		return err
   612  	}
   613  
   614  	a.Signature, err = lnwire.NewSigFromSignature(sig)
   615  	if err != nil {
   616  		return err
   617  	}
   618  
   619  	return nil
   620  }
   621  
   622  func createAnnouncementWithoutProof(blockHeight uint32,
   623  	key1, key2 *secp256k1.PublicKey,
   624  	extraBytes ...[]byte) *lnwire.ChannelAnnouncement {
   625  
   626  	a := &lnwire.ChannelAnnouncement{
   627  		ShortChannelID: lnwire.ShortChannelID{
   628  			BlockHeight: blockHeight,
   629  			TxIndex:     0,
   630  			TxPosition:  0,
   631  		},
   632  		Features: testFeatures,
   633  	}
   634  	copy(a.NodeID1[:], key1.SerializeCompressed())
   635  	copy(a.NodeID2[:], key2.SerializeCompressed())
   636  	copy(a.DecredKey1[:], decredKeyPub1.SerializeCompressed())
   637  	copy(a.DecredKey2[:], decredKeyPub2.SerializeCompressed())
   638  	if len(extraBytes) == 1 {
   639  		a.ExtraOpaqueData = extraBytes[0]
   640  	}
   641  
   642  	return a
   643  }
   644  
   645  func createRemoteChannelAnnouncement(blockHeight uint32,
   646  	extraBytes ...[]byte) (*lnwire.ChannelAnnouncement, error) {
   647  
   648  	return createChannelAnnouncement(blockHeight, remoteKeyPriv1, remoteKeyPriv2, extraBytes...)
   649  }
   650  
   651  func createChannelAnnouncement(blockHeight uint32, key1, key2 *secp256k1.PrivateKey,
   652  	extraBytes ...[]byte) (*lnwire.ChannelAnnouncement, error) {
   653  
   654  	a := createAnnouncementWithoutProof(blockHeight, key1.PubKey(), key2.PubKey(), extraBytes...)
   655  
   656  	signer := mock.SingleSigner{Privkey: key1}
   657  	sig, err := netann.SignAnnouncement(&signer, testKeyLoc, a)
   658  	if err != nil {
   659  		return nil, err
   660  	}
   661  	a.NodeSig1, err = lnwire.NewSigFromSignature(sig)
   662  	if err != nil {
   663  		return nil, err
   664  	}
   665  
   666  	signer = mock.SingleSigner{Privkey: key2}
   667  	sig, err = netann.SignAnnouncement(&signer, testKeyLoc, a)
   668  	if err != nil {
   669  		return nil, err
   670  	}
   671  	a.NodeSig2, err = lnwire.NewSigFromSignature(sig)
   672  	if err != nil {
   673  		return nil, err
   674  	}
   675  
   676  	signer = mock.SingleSigner{Privkey: decredKeyPriv1}
   677  	sig, err = netann.SignAnnouncement(&signer, testKeyLoc, a)
   678  	if err != nil {
   679  		return nil, err
   680  	}
   681  	a.DecredSig1, err = lnwire.NewSigFromSignature(sig)
   682  	if err != nil {
   683  		return nil, err
   684  	}
   685  
   686  	signer = mock.SingleSigner{Privkey: decredKeyPriv2}
   687  	sig, err = netann.SignAnnouncement(&signer, testKeyLoc, a)
   688  	if err != nil {
   689  		return nil, err
   690  	}
   691  	a.DecredSig2, err = lnwire.NewSigFromSignature(sig)
   692  	if err != nil {
   693  		return nil, err
   694  	}
   695  
   696  	return a, nil
   697  }
   698  
   699  type testCtx struct {
   700  	gossiper           *AuthenticatedGossiper
   701  	router             *mockGraphSource
   702  	notifier           *mockNotifier
   703  	broadcastedMessage chan msgWithSenders
   704  }
   705  
   706  func createTestCtx(startHeight uint32) (*testCtx, func(), error) {
   707  	// Next we'll initialize an instance of the channel router with mock
   708  	// versions of the chain and channel notifier. As we don't need to test
   709  	// any p2p functionality, the peer send and switch send,
   710  	// broadcast functions won't be populated.
   711  	notifier := newMockNotifier()
   712  	router := newMockRouter(startHeight)
   713  
   714  	db, cleanUpDb, err := makeTestDB()
   715  	if err != nil {
   716  		return nil, nil, err
   717  	}
   718  
   719  	waitingProofStore, err := channeldb.NewWaitingProofStore(db)
   720  	if err != nil {
   721  		cleanUpDb()
   722  		return nil, nil, err
   723  	}
   724  
   725  	broadcastedMessage := make(chan msgWithSenders, 10)
   726  	gossiper := New(Config{
   727  		Notifier: notifier,
   728  		Broadcast: func(senders map[route.Vertex]struct{},
   729  			msgs ...lnwire.Message) error {
   730  
   731  			for _, msg := range msgs {
   732  				broadcastedMessage <- msgWithSenders{
   733  					msg:     msg,
   734  					senders: senders,
   735  				}
   736  			}
   737  
   738  			return nil
   739  		},
   740  		NotifyWhenOnline: func(target [33]byte,
   741  			peerChan chan<- lnpeer.Peer) {
   742  
   743  			pk, _ := secp256k1.ParsePubKey(target[:])
   744  			peerChan <- &mockPeer{pk, nil, nil}
   745  		},
   746  		NotifyWhenOffline: func(_ [33]byte) <-chan struct{} {
   747  			c := make(chan struct{})
   748  			return c
   749  		},
   750  		SelfNodeAnnouncement: func(bool) (lnwire.NodeAnnouncement, error) {
   751  			return lnwire.NodeAnnouncement{
   752  				Timestamp: testTimestamp,
   753  			}, nil
   754  		},
   755  		Router:                router,
   756  		TrickleDelay:          trickleDelay,
   757  		RetransmitTicker:      ticker.NewForce(retransmitDelay),
   758  		RebroadcastInterval:   rebroadcastInterval,
   759  		ProofMatureDelta:      proofMatureDelta,
   760  		WaitingProofStore:     waitingProofStore,
   761  		MessageStore:          newMockMessageStore(),
   762  		GossiperState:         newMockGossipState(),
   763  		RotateTicker:          ticker.NewForce(DefaultSyncerRotationInterval),
   764  		HistoricalSyncTicker:  ticker.NewForce(DefaultHistoricalSyncInterval),
   765  		NumActiveSyncers:      3,
   766  		AnnSigner:             &mock.SingleSigner{Privkey: selfKeyPriv},
   767  		SubBatchDelay:         time.Second * 5,
   768  		MinimumBatchSize:      10,
   769  		MaxChannelUpdateBurst: DefaultMaxChannelUpdateBurst,
   770  		ChannelUpdateInterval: DefaultChannelUpdateInterval,
   771  	}, selfKeyDesc)
   772  
   773  	if err := gossiper.Start(); err != nil {
   774  		cleanUpDb()
   775  		return nil, nil, fmt.Errorf("unable to start router: %v", err)
   776  	}
   777  
   778  	// Mark the graph as synced in order to allow the announcements to be
   779  	// broadcast.
   780  	gossiper.syncMgr.markGraphSynced()
   781  
   782  	cleanUp := func() {
   783  		gossiper.Stop()
   784  		cleanUpDb()
   785  	}
   786  
   787  	return &testCtx{
   788  		router:             router,
   789  		notifier:           notifier,
   790  		gossiper:           gossiper,
   791  		broadcastedMessage: broadcastedMessage,
   792  	}, cleanUp, nil
   793  }
   794  
   795  // TestProcessAnnouncement checks that mature announcements are propagated to
   796  // the router subsystem.
   797  func TestProcessAnnouncement(t *testing.T) {
   798  	t.Parallel()
   799  
   800  	timestamp := testTimestamp
   801  	ctx, cleanup, err := createTestCtx(0)
   802  	if err != nil {
   803  		t.Fatalf("can't create context: %v", err)
   804  	}
   805  	defer cleanup()
   806  
   807  	assertSenderExistence := func(sender *secp256k1.PublicKey, msg msgWithSenders) {
   808  		if _, ok := msg.senders[route.NewVertex(sender)]; !ok {
   809  			t.Fatalf("sender=%x not present in %v",
   810  				sender.SerializeCompressed(), spew.Sdump(msg))
   811  		}
   812  	}
   813  
   814  	nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
   815  
   816  	// First, we'll craft a valid remote channel announcement and send it to
   817  	// the gossiper so that it can be processed.
   818  	ca, err := createRemoteChannelAnnouncement(0)
   819  	if err != nil {
   820  		t.Fatalf("can't create channel announcement: %v", err)
   821  	}
   822  
   823  	select {
   824  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer):
   825  	case <-time.After(2 * time.Second):
   826  		t.Fatal("remote announcement not processed")
   827  	}
   828  	if err != nil {
   829  		t.Fatalf("can't process remote announcement: %v", err)
   830  	}
   831  
   832  	// The announcement should be broadcast and included in our local view
   833  	// of the graph.
   834  	select {
   835  	case msg := <-ctx.broadcastedMessage:
   836  		assertSenderExistence(nodePeer.IdentityKey(), msg)
   837  	case <-time.After(2 * trickleDelay):
   838  		t.Fatal("announcement wasn't proceeded")
   839  	}
   840  
   841  	if len(ctx.router.infos) != 1 {
   842  		t.Fatalf("edge wasn't added to router: %v", err)
   843  	}
   844  
   845  	// We'll then craft the channel policy of the remote party and also send
   846  	// it to the gossiper.
   847  	ua, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
   848  	if err != nil {
   849  		t.Fatalf("can't create update announcement: %v", err)
   850  	}
   851  
   852  	select {
   853  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ua, nodePeer):
   854  	case <-time.After(2 * time.Second):
   855  		t.Fatal("remote announcement not processed")
   856  	}
   857  	if err != nil {
   858  		t.Fatalf("can't process remote announcement: %v", err)
   859  	}
   860  
   861  	// The channel policy should be broadcast to the rest of the network.
   862  	select {
   863  	case msg := <-ctx.broadcastedMessage:
   864  		assertSenderExistence(nodePeer.IdentityKey(), msg)
   865  	case <-time.After(2 * trickleDelay):
   866  		t.Fatal("announcement wasn't proceeded")
   867  	}
   868  
   869  	if len(ctx.router.edges) != 1 {
   870  		t.Fatalf("edge update wasn't added to router: %v", err)
   871  	}
   872  
   873  	// Finally, we'll craft the remote party's node announcement.
   874  	na, err := createNodeAnnouncement(remoteKeyPriv1, timestamp)
   875  	if err != nil {
   876  		t.Fatalf("can't create node announcement: %v", err)
   877  	}
   878  
   879  	select {
   880  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(na, nodePeer):
   881  	case <-time.After(2 * time.Second):
   882  		t.Fatal("remote announcement not processed")
   883  	}
   884  	if err != nil {
   885  		t.Fatalf("can't process remote announcement: %v", err)
   886  	}
   887  
   888  	// It should also be broadcast to the network and included in our local
   889  	// view of the graph.
   890  	select {
   891  	case msg := <-ctx.broadcastedMessage:
   892  		assertSenderExistence(nodePeer.IdentityKey(), msg)
   893  	case <-time.After(2 * trickleDelay):
   894  		t.Fatal("announcement wasn't proceeded")
   895  	}
   896  
   897  	if len(ctx.router.nodes) != 1 {
   898  		t.Fatalf("node wasn't added to router: %v", err)
   899  	}
   900  }
   901  
   902  // TestPrematureAnnouncement checks that premature announcements are not
   903  // propagated to the router subsystem.
   904  func TestPrematureAnnouncement(t *testing.T) {
   905  	t.Parallel()
   906  
   907  	timestamp := testTimestamp
   908  
   909  	ctx, cleanup, err := createTestCtx(0)
   910  	if err != nil {
   911  		t.Fatalf("can't create context: %v", err)
   912  	}
   913  	defer cleanup()
   914  
   915  	_, err = createNodeAnnouncement(remoteKeyPriv1, timestamp)
   916  	if err != nil {
   917  		t.Fatalf("can't create node announcement: %v", err)
   918  	}
   919  
   920  	nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
   921  
   922  	// Pretending that we receive the valid channel announcement from
   923  	// remote side, but block height of this announcement is greater than
   924  	// highest know to us, for that reason it should be ignored and not
   925  	// added to the router.
   926  	ca, err := createRemoteChannelAnnouncement(1)
   927  	if err != nil {
   928  		t.Fatalf("can't create channel announcement: %v", err)
   929  	}
   930  
   931  	select {
   932  	case <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer):
   933  	case <-time.After(time.Second):
   934  		t.Fatal("announcement was not processed")
   935  	}
   936  
   937  	if len(ctx.router.infos) != 0 {
   938  		t.Fatal("edge was added to router")
   939  	}
   940  }
   941  
   942  // TestSignatureAnnouncementLocalFirst ensures that the AuthenticatedGossiper
   943  // properly processes partial and fully announcement signatures message.
   944  func TestSignatureAnnouncementLocalFirst(t *testing.T) {
   945  	t.Parallel()
   946  
   947  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
   948  	if err != nil {
   949  		t.Fatalf("can't create context: %v", err)
   950  	}
   951  	defer cleanup()
   952  
   953  	// Set up a channel that we can use to inspect the messages sent
   954  	// directly from the gossiper.
   955  	sentMsgs := make(chan lnwire.Message, 10)
   956  	ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(target [33]byte,
   957  		peerChan chan<- lnpeer.Peer) {
   958  
   959  		pk, _ := secp256k1.ParsePubKey(target[:])
   960  
   961  		select {
   962  		case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}:
   963  		case <-ctx.gossiper.quit:
   964  		}
   965  	}
   966  
   967  	batch, err := createLocalAnnouncements(0)
   968  	if err != nil {
   969  		t.Fatalf("can't generate announcements: %v", err)
   970  	}
   971  
   972  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
   973  	if err != nil {
   974  		t.Fatalf("unable to parse pubkey: %v", err)
   975  	}
   976  	remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
   977  
   978  	// Recreate lightning network topology. Initialize router with channel
   979  	// between two nodes.
   980  	select {
   981  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
   982  	case <-time.After(2 * time.Second):
   983  		t.Fatal("did not process local announcement")
   984  	}
   985  	if err != nil {
   986  		t.Fatalf("unable to process channel ann: %v", err)
   987  	}
   988  	select {
   989  	case <-ctx.broadcastedMessage:
   990  		t.Fatal("channel announcement was broadcast")
   991  	case <-time.After(2 * trickleDelay):
   992  	}
   993  
   994  	select {
   995  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
   996  	case <-time.After(2 * time.Second):
   997  		t.Fatal("did not process local announcement")
   998  	}
   999  	if err != nil {
  1000  		t.Fatalf("unable to process channel update: %v", err)
  1001  	}
  1002  	select {
  1003  	case <-ctx.broadcastedMessage:
  1004  		t.Fatal("channel update announcement was broadcast")
  1005  	case <-time.After(2 * trickleDelay):
  1006  	}
  1007  
  1008  	select {
  1009  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1):
  1010  	case <-time.After(2 * time.Second):
  1011  		t.Fatal("did not process local announcement")
  1012  	}
  1013  	if err != nil {
  1014  		t.Fatalf("unable to process node ann: %v", err)
  1015  	}
  1016  	select {
  1017  	case <-ctx.broadcastedMessage:
  1018  		t.Fatal("node announcement was broadcast")
  1019  	case <-time.After(2 * trickleDelay):
  1020  	}
  1021  
  1022  	// The local ChannelUpdate should now be sent directly to the remote peer,
  1023  	// such that the edge can be used for routing, regardless if this channel
  1024  	// is announced or not (private channel).
  1025  	select {
  1026  	case msg := <-sentMsgs:
  1027  		assertMessage(t, batch.chanUpdAnn1, msg)
  1028  	case <-time.After(1 * time.Second):
  1029  		t.Fatal("gossiper did not send channel update to peer")
  1030  	}
  1031  
  1032  	select {
  1033  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1034  		batch.chanUpdAnn2, remotePeer,
  1035  	):
  1036  	case <-time.After(2 * time.Second):
  1037  		t.Fatal("did not process remote announcement")
  1038  	}
  1039  	if err != nil {
  1040  		t.Fatalf("unable to process channel update: %v", err)
  1041  	}
  1042  	select {
  1043  	case <-ctx.broadcastedMessage:
  1044  		t.Fatal("channel update announcement was broadcast")
  1045  	case <-time.After(2 * trickleDelay):
  1046  	}
  1047  
  1048  	select {
  1049  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1050  		batch.nodeAnn2, remotePeer,
  1051  	):
  1052  	case <-time.After(2 * time.Second):
  1053  		t.Fatal("did not process remote announcement")
  1054  	}
  1055  	if err != nil {
  1056  		t.Fatalf("unable to process node ann: %v", err)
  1057  	}
  1058  	select {
  1059  	case <-ctx.broadcastedMessage:
  1060  		t.Fatal("node announcement was broadcast")
  1061  	case <-time.After(2 * trickleDelay):
  1062  	}
  1063  
  1064  	// Pretending that we receive local channel announcement from funding
  1065  	// manager, thereby kick off the announcement exchange process.
  1066  	select {
  1067  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn):
  1068  	case <-time.After(2 * time.Second):
  1069  		t.Fatal("did not process remote announcement")
  1070  	}
  1071  	if err != nil {
  1072  		t.Fatalf("unable to process local proof: %v", err)
  1073  	}
  1074  
  1075  	select {
  1076  	case <-ctx.broadcastedMessage:
  1077  		t.Fatal("announcements were broadcast")
  1078  	case <-time.After(2 * trickleDelay):
  1079  	}
  1080  
  1081  	number := 0
  1082  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  1083  		func(*channeldb.WaitingProof) error {
  1084  			number++
  1085  			return nil
  1086  		},
  1087  		func() {
  1088  			number = 0
  1089  		},
  1090  	); err != nil {
  1091  		t.Fatalf("unable to retrieve objects from store: %v", err)
  1092  	}
  1093  
  1094  	if number != 1 {
  1095  		t.Fatal("wrong number of objects in storage")
  1096  	}
  1097  
  1098  	select {
  1099  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1100  		batch.remoteProofAnn, remotePeer,
  1101  	):
  1102  	case <-time.After(2 * time.Second):
  1103  		t.Fatal("did not process remote announcement")
  1104  	}
  1105  	if err != nil {
  1106  		t.Fatalf("unable to process remote proof: %v", err)
  1107  	}
  1108  
  1109  	for i := 0; i < 5; i++ {
  1110  		select {
  1111  		case <-ctx.broadcastedMessage:
  1112  		case <-time.After(time.Second):
  1113  			t.Fatal("announcement wasn't broadcast")
  1114  		}
  1115  	}
  1116  
  1117  	number = 0
  1118  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  1119  		func(*channeldb.WaitingProof) error {
  1120  			number++
  1121  			return nil
  1122  		},
  1123  		func() {
  1124  			number = 0
  1125  		},
  1126  	); err != nil && err != channeldb.ErrWaitingProofNotFound {
  1127  		t.Fatalf("unable to retrieve objects from store: %v", err)
  1128  	}
  1129  
  1130  	if number != 0 {
  1131  		t.Fatal("waiting proof should be removed from storage")
  1132  	}
  1133  }
  1134  
  1135  // TestOrphanSignatureAnnouncement ensures that the gossiper properly
  1136  // processes announcement with unknown channel ids.
  1137  func TestOrphanSignatureAnnouncement(t *testing.T) {
  1138  	t.Parallel()
  1139  
  1140  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  1141  	if err != nil {
  1142  		t.Fatalf("can't create context: %v", err)
  1143  	}
  1144  	defer cleanup()
  1145  
  1146  	// Set up a channel that we can use to inspect the messages sent
  1147  	// directly from the gossiper.
  1148  	sentMsgs := make(chan lnwire.Message, 10)
  1149  	ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(target [33]byte,
  1150  		peerChan chan<- lnpeer.Peer) {
  1151  
  1152  		pk, _ := secp256k1.ParsePubKey(target[:])
  1153  
  1154  		select {
  1155  		case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}:
  1156  		case <-ctx.gossiper.quit:
  1157  		}
  1158  	}
  1159  
  1160  	batch, err := createLocalAnnouncements(0)
  1161  	if err != nil {
  1162  		t.Fatalf("can't generate announcements: %v", err)
  1163  	}
  1164  
  1165  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  1166  	if err != nil {
  1167  		t.Fatalf("unable to parse pubkey: %v", err)
  1168  	}
  1169  	remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
  1170  
  1171  	// Pretending that we receive local channel announcement from funding
  1172  	// manager, thereby kick off the announcement exchange process, in
  1173  	// this case the announcement should be added in the orphan batch
  1174  	// because we haven't announce the channel yet.
  1175  	select {
  1176  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteProofAnn,
  1177  		remotePeer):
  1178  	case <-time.After(2 * time.Second):
  1179  		t.Fatal("did not process remote announcement")
  1180  	}
  1181  	if err != nil {
  1182  		t.Fatalf("unable to proceed announcement: %v", err)
  1183  	}
  1184  
  1185  	number := 0
  1186  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  1187  		func(*channeldb.WaitingProof) error {
  1188  			number++
  1189  			return nil
  1190  		},
  1191  		func() {
  1192  			number = 0
  1193  		},
  1194  	); err != nil {
  1195  		t.Fatalf("unable to retrieve objects from store: %v", err)
  1196  	}
  1197  
  1198  	if number != 1 {
  1199  		t.Fatal("wrong number of objects in storage")
  1200  	}
  1201  
  1202  	// Recreate lightning network topology. Initialize router with channel
  1203  	// between two nodes.
  1204  	select {
  1205  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
  1206  	case <-time.After(2 * time.Second):
  1207  		t.Fatal("did not process local announcement")
  1208  	}
  1209  
  1210  	if err != nil {
  1211  		t.Fatalf("unable to process: %v", err)
  1212  	}
  1213  
  1214  	select {
  1215  	case <-ctx.broadcastedMessage:
  1216  		t.Fatal("channel announcement was broadcast")
  1217  	case <-time.After(2 * trickleDelay):
  1218  	}
  1219  
  1220  	select {
  1221  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
  1222  	case <-time.After(2 * time.Second):
  1223  		t.Fatal("did not process local announcement")
  1224  	}
  1225  	if err != nil {
  1226  		t.Fatalf("unable to process: %v", err)
  1227  	}
  1228  
  1229  	select {
  1230  	case <-ctx.broadcastedMessage:
  1231  		t.Fatal("channel update announcement was broadcast")
  1232  	case <-time.After(2 * trickleDelay):
  1233  	}
  1234  
  1235  	select {
  1236  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1):
  1237  	case <-time.After(2 * time.Second):
  1238  		t.Fatal("did not process local announcement")
  1239  	}
  1240  	if err != nil {
  1241  		t.Fatalf("unable to process node ann: %v", err)
  1242  	}
  1243  	select {
  1244  	case <-ctx.broadcastedMessage:
  1245  		t.Fatal("node announcement was broadcast")
  1246  	case <-time.After(2 * trickleDelay):
  1247  	}
  1248  
  1249  	// The local ChannelUpdate should now be sent directly to the remote peer,
  1250  	// such that the edge can be used for routing, regardless if this channel
  1251  	// is announced or not (private channel).
  1252  	select {
  1253  	case msg := <-sentMsgs:
  1254  		assertMessage(t, batch.chanUpdAnn1, msg)
  1255  	case <-time.After(1 * time.Second):
  1256  		t.Fatal("gossiper did not send channel update to peer")
  1257  	}
  1258  
  1259  	select {
  1260  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2,
  1261  		remotePeer):
  1262  	case <-time.After(2 * time.Second):
  1263  		t.Fatal("did not process remote announcement")
  1264  	}
  1265  	if err != nil {
  1266  		t.Fatalf("unable to process node ann: %v", err)
  1267  	}
  1268  	select {
  1269  	case <-ctx.broadcastedMessage:
  1270  		t.Fatal("channel update announcement was broadcast")
  1271  	case <-time.After(2 * trickleDelay):
  1272  	}
  1273  
  1274  	select {
  1275  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1276  		batch.nodeAnn2, remotePeer,
  1277  	):
  1278  	case <-time.After(2 * time.Second):
  1279  		t.Fatal("did not process remote announcement")
  1280  	}
  1281  	if err != nil {
  1282  		t.Fatalf("unable to process: %v", err)
  1283  	}
  1284  	select {
  1285  	case <-ctx.broadcastedMessage:
  1286  		t.Fatal("node announcement announcement was broadcast")
  1287  	case <-time.After(2 * trickleDelay):
  1288  	}
  1289  
  1290  	// After that we process local announcement, and waiting to receive
  1291  	// the channel announcement.
  1292  	select {
  1293  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn):
  1294  	case <-time.After(2 * time.Second):
  1295  		t.Fatal("did not process remote announcement")
  1296  	}
  1297  	if err != nil {
  1298  		t.Fatalf("unable to process: %v", err)
  1299  	}
  1300  
  1301  	// The local proof should be sent to the remote peer.
  1302  	select {
  1303  	case msg := <-sentMsgs:
  1304  		assertMessage(t, batch.localProofAnn, msg)
  1305  	case <-time.After(2 * time.Second):
  1306  		t.Fatalf("local proof was not sent to peer")
  1307  	}
  1308  
  1309  	// And since both remote and local announcements are processed, we
  1310  	// should be broadcasting the final channel announcements.
  1311  	for i := 0; i < 5; i++ {
  1312  		select {
  1313  		case <-ctx.broadcastedMessage:
  1314  		case <-time.After(time.Second):
  1315  			t.Fatal("announcement wasn't broadcast")
  1316  		}
  1317  	}
  1318  
  1319  	number = 0
  1320  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  1321  		func(p *channeldb.WaitingProof) error {
  1322  			number++
  1323  			return nil
  1324  		},
  1325  		func() {
  1326  			number = 0
  1327  		},
  1328  	); err != nil {
  1329  		t.Fatalf("unable to retrieve objects from store: %v", err)
  1330  	}
  1331  
  1332  	if number != 0 {
  1333  		t.Fatalf("wrong number of objects in storage: %v", number)
  1334  	}
  1335  }
  1336  
  1337  // TestSignatureAnnouncementRetryAtStartup tests that if we restart the
  1338  // gossiper, it will retry sending the AnnounceSignatures to the peer if it did
  1339  // not succeed before shutting down, and the full channel proof is not yet
  1340  // assembled.
  1341  func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
  1342  	t.Parallel()
  1343  
  1344  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  1345  	if err != nil {
  1346  		t.Fatalf("can't create context: %v", err)
  1347  	}
  1348  	defer cleanup()
  1349  
  1350  	batch, err := createLocalAnnouncements(0)
  1351  	if err != nil {
  1352  		t.Fatalf("can't generate announcements: %v", err)
  1353  	}
  1354  
  1355  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  1356  	if err != nil {
  1357  		t.Fatalf("unable to parse pubkey: %v", err)
  1358  	}
  1359  
  1360  	// Set up a channel to intercept the messages sent to the remote peer.
  1361  	sentToPeer := make(chan lnwire.Message, 1)
  1362  	remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit}
  1363  
  1364  	// Since the reliable send to the remote peer of the local channel
  1365  	// proof requires a notification when the peer comes online, we'll
  1366  	// capture the channel through which it gets sent to control exactly
  1367  	// when to dispatch it.
  1368  	notifyPeers := make(chan chan<- lnpeer.Peer, 1)
  1369  	ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
  1370  		connectedChan chan<- lnpeer.Peer) {
  1371  		notifyPeers <- connectedChan
  1372  	}
  1373  
  1374  	// Recreate lightning network topology. Initialize router with channel
  1375  	// between two nodes.
  1376  	select {
  1377  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
  1378  	case <-time.After(2 * time.Second):
  1379  		t.Fatal("did not process local announcement")
  1380  	}
  1381  	if err != nil {
  1382  		t.Fatalf("unable to process channel ann: %v", err)
  1383  	}
  1384  	select {
  1385  	case <-ctx.broadcastedMessage:
  1386  		t.Fatal("channel announcement was broadcast")
  1387  	case <-time.After(2 * trickleDelay):
  1388  	}
  1389  
  1390  	// Pretending that we receive local channel announcement from funding
  1391  	// manager, thereby kick off the announcement exchange process.
  1392  	select {
  1393  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  1394  		batch.localProofAnn,
  1395  	):
  1396  	case <-time.After(2 * time.Second):
  1397  		t.Fatal("did not process remote announcement")
  1398  	}
  1399  	if err != nil {
  1400  		t.Fatalf("unable to process :%v", err)
  1401  	}
  1402  
  1403  	// The gossiper should register for a notification for when the peer is
  1404  	// online.
  1405  	select {
  1406  	case <-notifyPeers:
  1407  	case <-time.After(2 * time.Second):
  1408  		t.Fatalf("gossiper did not ask to get notified when " +
  1409  			"peer is online")
  1410  	}
  1411  
  1412  	// The proof should not be broadcast yet since we're still missing the
  1413  	// remote party's.
  1414  	select {
  1415  	case <-ctx.broadcastedMessage:
  1416  		t.Fatal("announcements were broadcast")
  1417  	case <-time.After(2 * trickleDelay):
  1418  	}
  1419  
  1420  	// And it shouldn't be sent to the peer either as they are offline.
  1421  	select {
  1422  	case msg := <-sentToPeer:
  1423  		t.Fatalf("received unexpected message: %v", spew.Sdump(msg))
  1424  	case <-time.After(time.Second):
  1425  	}
  1426  
  1427  	number := 0
  1428  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  1429  		func(*channeldb.WaitingProof) error {
  1430  			number++
  1431  			return nil
  1432  		},
  1433  		func() {
  1434  			number = 0
  1435  		},
  1436  	); err != nil {
  1437  		t.Fatalf("unable to retrieve objects from store: %v", err)
  1438  	}
  1439  
  1440  	if number != 1 {
  1441  		t.Fatal("wrong number of objects in storage")
  1442  	}
  1443  
  1444  	// Restart the gossiper and restore its original NotifyWhenOnline and
  1445  	// NotifyWhenOffline methods. This should trigger a new attempt to send
  1446  	// the message to the peer.
  1447  	ctx.gossiper.Stop()
  1448  	gossiper := New(Config{
  1449  		Notifier:             ctx.gossiper.cfg.Notifier,
  1450  		Broadcast:            ctx.gossiper.cfg.Broadcast,
  1451  		NotifyWhenOnline:     ctx.gossiper.reliableSender.cfg.NotifyWhenOnline,
  1452  		NotifyWhenOffline:    ctx.gossiper.reliableSender.cfg.NotifyWhenOffline,
  1453  		SelfNodeAnnouncement: ctx.gossiper.cfg.SelfNodeAnnouncement,
  1454  		Router:               ctx.gossiper.cfg.Router,
  1455  		TrickleDelay:         trickleDelay,
  1456  		RetransmitTicker:     ticker.NewForce(retransmitDelay),
  1457  		RebroadcastInterval:  rebroadcastInterval,
  1458  		ProofMatureDelta:     proofMatureDelta,
  1459  		WaitingProofStore:    ctx.gossiper.cfg.WaitingProofStore,
  1460  		MessageStore:         ctx.gossiper.cfg.MessageStore,
  1461  		RotateTicker:         ticker.NewForce(DefaultSyncerRotationInterval),
  1462  		HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval),
  1463  		NumActiveSyncers:     3,
  1464  		MinimumBatchSize:     10,
  1465  		SubBatchDelay:        time.Second * 5,
  1466  	}, &keychain.KeyDescriptor{
  1467  		PubKey:     ctx.gossiper.selfKey,
  1468  		KeyLocator: ctx.gossiper.selfKeyLoc,
  1469  	})
  1470  	if err != nil {
  1471  		t.Fatalf("unable to recreate gossiper: %v", err)
  1472  	}
  1473  	if err := gossiper.Start(); err != nil {
  1474  		t.Fatalf("unable to start recreated gossiper: %v", err)
  1475  	}
  1476  	defer gossiper.Stop()
  1477  
  1478  	// Mark the graph as synced in order to allow the announcements to be
  1479  	// broadcast.
  1480  	gossiper.syncMgr.markGraphSynced()
  1481  
  1482  	ctx.gossiper = gossiper
  1483  	remotePeer.quit = ctx.gossiper.quit
  1484  
  1485  	// After starting up, the gossiper will see that it has a proof in the
  1486  	// WaitingProofStore, and will retry sending its part to the remote.
  1487  	// It should register for a notification for when the peer is online.
  1488  	var peerChan chan<- lnpeer.Peer
  1489  	select {
  1490  	case peerChan = <-notifyPeers:
  1491  	case <-time.After(2 * time.Second):
  1492  		t.Fatalf("gossiper did not ask to get notified when " +
  1493  			"peer is online")
  1494  	}
  1495  
  1496  	// Notify that peer is now online. This should allow the proof to be
  1497  	// sent.
  1498  	peerChan <- remotePeer
  1499  
  1500  out:
  1501  	for {
  1502  		select {
  1503  		case msg := <-sentToPeer:
  1504  			// Since the ChannelUpdate will also be resent as it is
  1505  			// sent reliably, we'll need to filter it out.
  1506  			if _, ok := msg.(*lnwire.AnnounceSignatures); !ok {
  1507  				continue
  1508  			}
  1509  
  1510  			assertMessage(t, batch.localProofAnn, msg)
  1511  			break out
  1512  		case <-time.After(2 * time.Second):
  1513  			t.Fatalf("gossiper did not send message when peer " +
  1514  				"came online")
  1515  		}
  1516  	}
  1517  
  1518  	// Now exchanging the remote channel proof, the channel announcement
  1519  	// broadcast should continue as normal.
  1520  	select {
  1521  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1522  		batch.remoteProofAnn, remotePeer,
  1523  	):
  1524  	case <-time.After(2 * time.Second):
  1525  		t.Fatal("did not process remote announcement")
  1526  	}
  1527  	if err != nil {
  1528  		t.Fatalf("unable to process :%v", err)
  1529  	}
  1530  
  1531  	select {
  1532  	case <-ctx.broadcastedMessage:
  1533  	case <-time.After(time.Second):
  1534  		t.Fatal("announcement wasn't broadcast")
  1535  	}
  1536  
  1537  	number = 0
  1538  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  1539  		func(*channeldb.WaitingProof) error {
  1540  			number++
  1541  			return nil
  1542  		},
  1543  		func() {
  1544  			number = 0
  1545  		},
  1546  	); err != nil && err != channeldb.ErrWaitingProofNotFound {
  1547  		t.Fatalf("unable to retrieve objects from store: %v", err)
  1548  	}
  1549  
  1550  	if number != 0 {
  1551  		t.Fatal("waiting proof should be removed from storage")
  1552  	}
  1553  }
  1554  
  1555  // TestSignatureAnnouncementFullProofWhenRemoteProof tests that if a remote
  1556  // proof is received when we already have the full proof, the gossiper will send
  1557  // the full proof (ChannelAnnouncement) to the remote peer.
  1558  func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
  1559  	t.Parallel()
  1560  
  1561  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  1562  	if err != nil {
  1563  		t.Fatalf("can't create context: %v", err)
  1564  	}
  1565  	defer cleanup()
  1566  
  1567  	batch, err := createLocalAnnouncements(0)
  1568  	if err != nil {
  1569  		t.Fatalf("can't generate announcements: %v", err)
  1570  	}
  1571  
  1572  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  1573  	if err != nil {
  1574  		t.Fatalf("unable to parse pubkey: %v", err)
  1575  	}
  1576  
  1577  	// Set up a channel we can use to inspect messages sent by the
  1578  	// gossiper to the remote peer.
  1579  	sentToPeer := make(chan lnwire.Message, 1)
  1580  	remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit}
  1581  
  1582  	// Override NotifyWhenOnline to return the remote peer which we expect
  1583  	// meesages to be sent to.
  1584  	ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
  1585  		peerChan chan<- lnpeer.Peer) {
  1586  
  1587  		peerChan <- remotePeer
  1588  	}
  1589  
  1590  	// Recreate lightning network topology. Initialize router with channel
  1591  	// between two nodes.
  1592  	select {
  1593  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  1594  		batch.chanAnn,
  1595  	):
  1596  	case <-time.After(2 * time.Second):
  1597  		t.Fatal("did not process local announcement")
  1598  	}
  1599  	if err != nil {
  1600  		t.Fatalf("unable to process channel ann: %v", err)
  1601  	}
  1602  	select {
  1603  	case <-ctx.broadcastedMessage:
  1604  		t.Fatal("channel announcement was broadcast")
  1605  	case <-time.After(2 * trickleDelay):
  1606  	}
  1607  
  1608  	select {
  1609  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  1610  		batch.chanUpdAnn1,
  1611  	):
  1612  	case <-time.After(2 * time.Second):
  1613  		t.Fatal("did not process local announcement")
  1614  	}
  1615  	if err != nil {
  1616  		t.Fatalf("unable to process channel update: %v", err)
  1617  	}
  1618  	select {
  1619  	case <-ctx.broadcastedMessage:
  1620  		t.Fatal("channel update announcement was broadcast")
  1621  	case <-time.After(2 * trickleDelay):
  1622  	}
  1623  
  1624  	select {
  1625  	case msg := <-sentToPeer:
  1626  		assertMessage(t, batch.chanUpdAnn1, msg)
  1627  	case <-time.After(2 * time.Second):
  1628  		t.Fatal("gossiper did not send channel update to remove peer")
  1629  	}
  1630  
  1631  	select {
  1632  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  1633  		batch.nodeAnn1,
  1634  	):
  1635  	case <-time.After(2 * time.Second):
  1636  		t.Fatal("did not process local announcement")
  1637  	}
  1638  	if err != nil {
  1639  		t.Fatalf("unable to process node ann:%v", err)
  1640  	}
  1641  	select {
  1642  	case <-ctx.broadcastedMessage:
  1643  		t.Fatal("node announcement was broadcast")
  1644  	case <-time.After(2 * trickleDelay):
  1645  	}
  1646  
  1647  	select {
  1648  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1649  		batch.chanUpdAnn2, remotePeer,
  1650  	):
  1651  	case <-time.After(2 * time.Second):
  1652  		t.Fatal("did not process remote announcement")
  1653  	}
  1654  	if err != nil {
  1655  		t.Fatalf("unable to process channel update: %v", err)
  1656  	}
  1657  	select {
  1658  	case <-ctx.broadcastedMessage:
  1659  		t.Fatal("channel update announcement was broadcast")
  1660  	case <-time.After(2 * trickleDelay):
  1661  	}
  1662  
  1663  	select {
  1664  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1665  		batch.nodeAnn2, remotePeer,
  1666  	):
  1667  	case <-time.After(2 * time.Second):
  1668  		t.Fatal("did not process remote announcement")
  1669  	}
  1670  	if err != nil {
  1671  		t.Fatalf("unable to process node ann: %v", err)
  1672  	}
  1673  	select {
  1674  	case <-ctx.broadcastedMessage:
  1675  		t.Fatal("node announcement was broadcast")
  1676  	case <-time.After(2 * trickleDelay):
  1677  	}
  1678  
  1679  	// Pretending that we receive local channel announcement from funding
  1680  	// manager, thereby kick off the announcement exchange process.
  1681  	select {
  1682  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  1683  		batch.localProofAnn,
  1684  	):
  1685  	case <-time.After(2 * time.Second):
  1686  		t.Fatal("did not process local announcement")
  1687  	}
  1688  	if err != nil {
  1689  		t.Fatalf("unable to process local proof: %v", err)
  1690  	}
  1691  
  1692  	select {
  1693  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1694  		batch.remoteProofAnn, remotePeer,
  1695  	):
  1696  	case <-time.After(2 * time.Second):
  1697  		t.Fatal("did not process local announcement")
  1698  	}
  1699  	if err != nil {
  1700  		t.Fatalf("unable to process remote proof: %v", err)
  1701  	}
  1702  
  1703  	// We expect the gossiper to send this message to the remote peer.
  1704  	select {
  1705  	case msg := <-sentToPeer:
  1706  		assertMessage(t, batch.localProofAnn, msg)
  1707  	case <-time.After(2 * time.Second):
  1708  		t.Fatal("did not send local proof to peer")
  1709  	}
  1710  
  1711  	// All channel and node announcements should be broadcast.
  1712  	for i := 0; i < 5; i++ {
  1713  		select {
  1714  		case <-ctx.broadcastedMessage:
  1715  		case <-time.After(time.Second):
  1716  			t.Fatal("announcement wasn't broadcast")
  1717  		}
  1718  	}
  1719  
  1720  	number := 0
  1721  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  1722  		func(*channeldb.WaitingProof) error {
  1723  			number++
  1724  			return nil
  1725  		},
  1726  		func() {
  1727  			number = 0
  1728  		},
  1729  	); err != nil && err != channeldb.ErrWaitingProofNotFound {
  1730  		t.Fatalf("unable to retrieve objects from store: %v", err)
  1731  	}
  1732  
  1733  	if number != 0 {
  1734  		t.Fatal("waiting proof should be removed from storage")
  1735  	}
  1736  
  1737  	// Now give the gossiper the remote proof yet again. This should
  1738  	// trigger a send of the full ChannelAnnouncement.
  1739  	select {
  1740  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  1741  		batch.remoteProofAnn, remotePeer,
  1742  	):
  1743  	case <-time.After(2 * time.Second):
  1744  		t.Fatal("did not process local announcement")
  1745  	}
  1746  	if err != nil {
  1747  		t.Fatalf("unable to process remote proof: %v", err)
  1748  	}
  1749  
  1750  	// We expect the gossiper to send this message to the remote peer.
  1751  	select {
  1752  	case msg := <-sentToPeer:
  1753  		_, ok := msg.(*lnwire.ChannelAnnouncement)
  1754  		if !ok {
  1755  			t.Fatalf("expected ChannelAnnouncement, instead got %T", msg)
  1756  		}
  1757  	case <-time.After(2 * time.Second):
  1758  		t.Fatal("did not send local proof to peer")
  1759  	}
  1760  }
  1761  
  1762  // TestDeDuplicatedAnnouncements ensures that the deDupedAnnouncements struct
  1763  // properly stores and delivers the set of de-duplicated announcements.
  1764  func TestDeDuplicatedAnnouncements(t *testing.T) {
  1765  	t.Parallel()
  1766  
  1767  	timestamp := testTimestamp
  1768  	announcements := deDupedAnnouncements{}
  1769  	announcements.Reset()
  1770  
  1771  	// Ensure that after new deDupedAnnouncements struct is created and
  1772  	// reset that storage of each announcement type is empty.
  1773  	if len(announcements.channelAnnouncements) != 0 {
  1774  		t.Fatal("channel announcements map not empty after reset")
  1775  	}
  1776  	if len(announcements.channelUpdates) != 0 {
  1777  		t.Fatal("channel updates map not empty after reset")
  1778  	}
  1779  	if len(announcements.nodeAnnouncements) != 0 {
  1780  		t.Fatal("node announcements map not empty after reset")
  1781  	}
  1782  
  1783  	// Ensure that remote channel announcements are properly stored
  1784  	// and de-duplicated.
  1785  	ca, err := createRemoteChannelAnnouncement(0)
  1786  	if err != nil {
  1787  		t.Fatalf("can't create remote channel announcement: %v", err)
  1788  	}
  1789  
  1790  	nodePeer := &mockPeer{decredKeyPub2, nil, nil}
  1791  	announcements.AddMsgs(networkMsg{
  1792  		msg:    ca,
  1793  		peer:   nodePeer,
  1794  		source: nodePeer.IdentityKey(),
  1795  	})
  1796  	if len(announcements.channelAnnouncements) != 1 {
  1797  		t.Fatal("new channel announcement not stored in batch")
  1798  	}
  1799  
  1800  	// We'll create a second instance of the same announcement with the
  1801  	// same channel ID. Adding this shouldn't cause an increase in the
  1802  	// number of items as they should be de-duplicated.
  1803  	ca2, err := createRemoteChannelAnnouncement(0)
  1804  	if err != nil {
  1805  		t.Fatalf("can't create remote channel announcement: %v", err)
  1806  	}
  1807  	announcements.AddMsgs(networkMsg{
  1808  		msg:    ca2,
  1809  		peer:   nodePeer,
  1810  		source: nodePeer.IdentityKey(),
  1811  	})
  1812  	if len(announcements.channelAnnouncements) != 1 {
  1813  		t.Fatal("channel announcement not replaced in batch")
  1814  	}
  1815  
  1816  	// Next, we'll ensure that channel update announcements are properly
  1817  	// stored and de-duplicated. We do this by creating two updates
  1818  	// announcements with the same short ID and flag.
  1819  	ua, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
  1820  	if err != nil {
  1821  		t.Fatalf("can't create update announcement: %v", err)
  1822  	}
  1823  	announcements.AddMsgs(networkMsg{
  1824  		msg:    ua,
  1825  		peer:   nodePeer,
  1826  		source: nodePeer.IdentityKey(),
  1827  	})
  1828  	if len(announcements.channelUpdates) != 1 {
  1829  		t.Fatal("new channel update not stored in batch")
  1830  	}
  1831  
  1832  	// Adding the very same announcement shouldn't cause an increase in the
  1833  	// number of ChannelUpdate announcements stored.
  1834  	ua2, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
  1835  	if err != nil {
  1836  		t.Fatalf("can't create update announcement: %v", err)
  1837  	}
  1838  	announcements.AddMsgs(networkMsg{
  1839  		msg:    ua2,
  1840  		peer:   nodePeer,
  1841  		source: nodePeer.IdentityKey(),
  1842  	})
  1843  	if len(announcements.channelUpdates) != 1 {
  1844  		t.Fatal("channel update not replaced in batch")
  1845  	}
  1846  
  1847  	// Adding an announcement with a later timestamp should replace the
  1848  	// stored one.
  1849  	ua3, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp+1)
  1850  	if err != nil {
  1851  		t.Fatalf("can't create update announcement: %v", err)
  1852  	}
  1853  	announcements.AddMsgs(networkMsg{
  1854  		msg:    ua3,
  1855  		peer:   nodePeer,
  1856  		source: nodePeer.IdentityKey(),
  1857  	})
  1858  	if len(announcements.channelUpdates) != 1 {
  1859  		t.Fatal("channel update not replaced in batch")
  1860  	}
  1861  
  1862  	assertChannelUpdate := func(channelUpdate *lnwire.ChannelUpdate) {
  1863  		channelKey := channelUpdateID{
  1864  			ua3.ShortChannelID,
  1865  			ua3.ChannelFlags,
  1866  		}
  1867  
  1868  		mws, ok := announcements.channelUpdates[channelKey]
  1869  		if !ok {
  1870  			t.Fatal("channel update not in batch")
  1871  		}
  1872  		if mws.msg != channelUpdate {
  1873  			t.Fatalf("expected channel update %v, got %v)",
  1874  				channelUpdate, mws.msg)
  1875  		}
  1876  	}
  1877  
  1878  	// Check that ua3 is the currently stored channel update.
  1879  	assertChannelUpdate(ua3)
  1880  
  1881  	// Adding a channel update with an earlier timestamp should NOT
  1882  	// replace the one stored.
  1883  	ua4, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
  1884  	if err != nil {
  1885  		t.Fatalf("can't create update announcement: %v", err)
  1886  	}
  1887  	announcements.AddMsgs(networkMsg{
  1888  		msg:    ua4,
  1889  		peer:   nodePeer,
  1890  		source: nodePeer.IdentityKey(),
  1891  	})
  1892  	if len(announcements.channelUpdates) != 1 {
  1893  		t.Fatal("channel update not in batch")
  1894  	}
  1895  	assertChannelUpdate(ua3)
  1896  
  1897  	// Next well ensure that node announcements are properly de-duplicated.
  1898  	// We'll first add a single instance with a node's private key.
  1899  	na, err := createNodeAnnouncement(remoteKeyPriv1, timestamp)
  1900  	if err != nil {
  1901  		t.Fatalf("can't create node announcement: %v", err)
  1902  	}
  1903  	announcements.AddMsgs(networkMsg{
  1904  		msg:    na,
  1905  		peer:   nodePeer,
  1906  		source: nodePeer.IdentityKey(),
  1907  	})
  1908  	if len(announcements.nodeAnnouncements) != 1 {
  1909  		t.Fatal("new node announcement not stored in batch")
  1910  	}
  1911  
  1912  	// We'll now add another node to the batch.
  1913  	na2, err := createNodeAnnouncement(remoteKeyPriv2, timestamp)
  1914  	if err != nil {
  1915  		t.Fatalf("can't create node announcement: %v", err)
  1916  	}
  1917  	announcements.AddMsgs(networkMsg{
  1918  		msg:    na2,
  1919  		peer:   nodePeer,
  1920  		source: nodePeer.IdentityKey(),
  1921  	})
  1922  	if len(announcements.nodeAnnouncements) != 2 {
  1923  		t.Fatal("second node announcement not stored in batch")
  1924  	}
  1925  
  1926  	// Adding a new instance of the _same_ node shouldn't increase the size
  1927  	// of the node ann batch.
  1928  	na3, err := createNodeAnnouncement(remoteKeyPriv2, timestamp)
  1929  	if err != nil {
  1930  		t.Fatalf("can't create node announcement: %v", err)
  1931  	}
  1932  	announcements.AddMsgs(networkMsg{
  1933  		msg:    na3,
  1934  		peer:   nodePeer,
  1935  		source: nodePeer.IdentityKey(),
  1936  	})
  1937  	if len(announcements.nodeAnnouncements) != 2 {
  1938  		t.Fatal("second node announcement not replaced in batch")
  1939  	}
  1940  
  1941  	// Ensure that node announcement with different pointer to same public
  1942  	// key is still de-duplicated.
  1943  	newNodeKeyPointer := remoteKeyPriv2
  1944  	na4, err := createNodeAnnouncement(newNodeKeyPointer, timestamp)
  1945  	if err != nil {
  1946  		t.Fatalf("can't create node announcement: %v", err)
  1947  	}
  1948  	announcements.AddMsgs(networkMsg{
  1949  		msg:    na4,
  1950  		peer:   nodePeer,
  1951  		source: nodePeer.IdentityKey(),
  1952  	})
  1953  	if len(announcements.nodeAnnouncements) != 2 {
  1954  		t.Fatal("second node announcement not replaced again in batch")
  1955  	}
  1956  
  1957  	// Ensure that node announcement with increased timestamp replaces
  1958  	// what is currently stored.
  1959  	na5, err := createNodeAnnouncement(remoteKeyPriv2, timestamp+1)
  1960  	if err != nil {
  1961  		t.Fatalf("can't create node announcement: %v", err)
  1962  	}
  1963  	announcements.AddMsgs(networkMsg{
  1964  		msg:    na5,
  1965  		peer:   nodePeer,
  1966  		source: nodePeer.IdentityKey(),
  1967  	})
  1968  	if len(announcements.nodeAnnouncements) != 2 {
  1969  		t.Fatal("node announcement not replaced in batch")
  1970  	}
  1971  	nodeID := route.NewVertex(remoteKeyPriv2.PubKey())
  1972  	stored, ok := announcements.nodeAnnouncements[nodeID]
  1973  	if !ok {
  1974  		t.Fatalf("node announcement not found in batch")
  1975  	}
  1976  	if stored.msg != na5 {
  1977  		t.Fatalf("expected de-duped node announcement to be %v, got %v",
  1978  			na5, stored.msg)
  1979  	}
  1980  
  1981  	// Ensure that announcement batch delivers channel announcements,
  1982  	// channel updates, and node announcements in proper order.
  1983  	batch := announcements.Emit()
  1984  	if len(batch) != 4 {
  1985  		t.Fatal("announcement batch incorrect length")
  1986  	}
  1987  
  1988  	if !reflect.DeepEqual(batch[0].msg, ca2) {
  1989  		t.Fatalf("channel announcement not first in batch: got %v, "+
  1990  			"expected %v", spew.Sdump(batch[0].msg), spew.Sdump(ca2))
  1991  	}
  1992  
  1993  	if !reflect.DeepEqual(batch[1].msg, ua3) {
  1994  		t.Fatalf("channel update not next in batch: got %v, "+
  1995  			"expected %v", spew.Sdump(batch[1].msg), spew.Sdump(ua2))
  1996  	}
  1997  
  1998  	// We'll ensure that both node announcements are present. We check both
  1999  	// indexes as due to the randomized order of map iteration they may be
  2000  	// in either place.
  2001  	if !reflect.DeepEqual(batch[2].msg, na) && !reflect.DeepEqual(batch[3].msg, na) {
  2002  		t.Fatalf("first node announcement not in last part of batch: "+
  2003  			"got %v, expected %v", batch[2].msg,
  2004  			na)
  2005  	}
  2006  	if !reflect.DeepEqual(batch[2].msg, na5) && !reflect.DeepEqual(batch[3].msg, na5) {
  2007  		t.Fatalf("second node announcement not in last part of batch: "+
  2008  			"got %v, expected %v", batch[3].msg,
  2009  			na5)
  2010  	}
  2011  
  2012  	// Ensure that after reset, storage of each announcement type
  2013  	// in deDupedAnnouncements struct is empty again.
  2014  	announcements.Reset()
  2015  	if len(announcements.channelAnnouncements) != 0 {
  2016  		t.Fatal("channel announcements map not empty after reset")
  2017  	}
  2018  	if len(announcements.channelUpdates) != 0 {
  2019  		t.Fatal("channel updates map not empty after reset")
  2020  	}
  2021  	if len(announcements.nodeAnnouncements) != 0 {
  2022  		t.Fatal("node announcements map not empty after reset")
  2023  	}
  2024  }
  2025  
  2026  // TestForwardPrivateNodeAnnouncement ensures that we do not forward node
  2027  // announcements for nodes who do not intend to publicly advertise themselves.
  2028  func TestForwardPrivateNodeAnnouncement(t *testing.T) {
  2029  	t.Parallel()
  2030  
  2031  	const (
  2032  		startingHeight = 100
  2033  		timestamp      = 123456
  2034  	)
  2035  
  2036  	ctx, cleanup, err := createTestCtx(startingHeight)
  2037  	if err != nil {
  2038  		t.Fatalf("can't create context: %v", err)
  2039  	}
  2040  	defer cleanup()
  2041  
  2042  	// We'll start off by processing a channel announcement without a proof
  2043  	// (i.e., an unadvertised channel), followed by a node announcement for
  2044  	// this same channel announcement.
  2045  	chanAnn := createAnnouncementWithoutProof(
  2046  		startingHeight-2, selfKeyDesc.PubKey, remoteKeyPub1,
  2047  	)
  2048  	pubKey := remoteKeyPriv1.PubKey()
  2049  
  2050  	select {
  2051  	case err := <-ctx.gossiper.ProcessLocalAnnouncement(chanAnn):
  2052  		if err != nil {
  2053  			t.Fatalf("unable to process local announcement: %v", err)
  2054  		}
  2055  	case <-time.After(2 * time.Second):
  2056  		t.Fatalf("local announcement not processed")
  2057  	}
  2058  
  2059  	// The gossiper should not broadcast the announcement due to it not
  2060  	// having its announcement signatures.
  2061  	select {
  2062  	case <-ctx.broadcastedMessage:
  2063  		t.Fatal("gossiper should not have broadcast channel announcement")
  2064  	case <-time.After(2 * trickleDelay):
  2065  	}
  2066  
  2067  	nodeAnn, err := createNodeAnnouncement(remoteKeyPriv1, timestamp)
  2068  	if err != nil {
  2069  		t.Fatalf("unable to create node announcement: %v", err)
  2070  	}
  2071  
  2072  	select {
  2073  	case err := <-ctx.gossiper.ProcessLocalAnnouncement(nodeAnn):
  2074  		if err != nil {
  2075  			t.Fatalf("unable to process remote announcement: %v", err)
  2076  		}
  2077  	case <-time.After(2 * time.Second):
  2078  		t.Fatal("remote announcement not processed")
  2079  	}
  2080  
  2081  	// The gossiper should also not broadcast the node announcement due to
  2082  	// it not being part of any advertised channels.
  2083  	select {
  2084  	case <-ctx.broadcastedMessage:
  2085  		t.Fatal("gossiper should not have broadcast node announcement")
  2086  	case <-time.After(2 * trickleDelay):
  2087  	}
  2088  
  2089  	// Now, we'll attempt to forward the NodeAnnouncement for the same node
  2090  	// by opening a public channel on the network. We'll create a
  2091  	// ChannelAnnouncement and hand it off to the gossiper in order to
  2092  	// process it.
  2093  	remoteChanAnn, err := createRemoteChannelAnnouncement(startingHeight - 1)
  2094  	if err != nil {
  2095  		t.Fatalf("unable to create remote channel announcement: %v", err)
  2096  	}
  2097  	peer := &mockPeer{pubKey, nil, nil}
  2098  
  2099  	select {
  2100  	case err := <-ctx.gossiper.ProcessRemoteAnnouncement(remoteChanAnn, peer):
  2101  		if err != nil {
  2102  			t.Fatalf("unable to process remote announcement: %v", err)
  2103  		}
  2104  	case <-time.After(2 * time.Second):
  2105  		t.Fatal("remote announcement not processed")
  2106  	}
  2107  
  2108  	select {
  2109  	case <-ctx.broadcastedMessage:
  2110  	case <-time.After(2 * trickleDelay):
  2111  		t.Fatal("gossiper should have broadcast the channel announcement")
  2112  	}
  2113  
  2114  	// We'll recreate the NodeAnnouncement with an updated timestamp to
  2115  	// prevent a stale update. The NodeAnnouncement should now be forwarded.
  2116  	nodeAnn, err = createNodeAnnouncement(remoteKeyPriv1, timestamp+1)
  2117  	if err != nil {
  2118  		t.Fatalf("unable to create node announcement: %v", err)
  2119  	}
  2120  
  2121  	select {
  2122  	case err := <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, peer):
  2123  		if err != nil {
  2124  			t.Fatalf("unable to process remote announcement: %v", err)
  2125  		}
  2126  	case <-time.After(2 * time.Second):
  2127  		t.Fatal("remote announcement not processed")
  2128  	}
  2129  
  2130  	select {
  2131  	case <-ctx.broadcastedMessage:
  2132  	case <-time.After(2 * trickleDelay):
  2133  		t.Fatal("gossiper should have broadcast the node announcement")
  2134  	}
  2135  }
  2136  
  2137  // TestRejectZombieEdge ensures that we properly reject any announcements for
  2138  // zombie edges.
  2139  func TestRejectZombieEdge(t *testing.T) {
  2140  	t.Parallel()
  2141  
  2142  	// We'll start by creating our test context with a batch of
  2143  	// announcements.
  2144  	ctx, cleanup, err := createTestCtx(0)
  2145  	if err != nil {
  2146  		t.Fatalf("unable to create test context: %v", err)
  2147  	}
  2148  	defer cleanup()
  2149  
  2150  	batch, err := createRemoteAnnouncements(0)
  2151  	if err != nil {
  2152  		t.Fatalf("unable to create announcements: %v", err)
  2153  	}
  2154  	remotePeer := &mockPeer{pk: remoteKeyPriv2.PubKey()}
  2155  
  2156  	// processAnnouncements is a helper closure we'll use to test that we
  2157  	// properly process/reject announcements based on whether they're for a
  2158  	// zombie edge or not.
  2159  	processAnnouncements := func(isZombie bool) {
  2160  		t.Helper()
  2161  
  2162  		errChan := ctx.gossiper.ProcessRemoteAnnouncement(
  2163  			batch.chanAnn, remotePeer,
  2164  		)
  2165  		select {
  2166  		case err := <-errChan:
  2167  			if isZombie && err != nil {
  2168  				t.Fatalf("expected to reject live channel "+
  2169  					"announcement with nil error: %v", err)
  2170  			}
  2171  			if !isZombie && err != nil {
  2172  				t.Fatalf("expected to process live channel "+
  2173  					"announcement: %v", err)
  2174  			}
  2175  		case <-time.After(time.Second):
  2176  			t.Fatal("expected to process channel announcement")
  2177  		}
  2178  		select {
  2179  		case <-ctx.broadcastedMessage:
  2180  			if isZombie {
  2181  				t.Fatal("expected to not broadcast zombie " +
  2182  					"channel announcement")
  2183  			}
  2184  		case <-time.After(2 * trickleDelay):
  2185  			if !isZombie {
  2186  				t.Fatal("expected to broadcast live channel " +
  2187  					"announcement")
  2188  			}
  2189  		}
  2190  
  2191  		errChan = ctx.gossiper.ProcessRemoteAnnouncement(
  2192  			batch.chanUpdAnn2, remotePeer,
  2193  		)
  2194  		select {
  2195  		case err := <-errChan:
  2196  			if isZombie && err != nil {
  2197  				t.Fatalf("expected to reject zombie channel "+
  2198  					"update with nil error: %v", err)
  2199  			}
  2200  			if !isZombie && err != nil {
  2201  				t.Fatalf("expected to process live channel "+
  2202  					"update: %v", err)
  2203  			}
  2204  		case <-time.After(time.Second):
  2205  			t.Fatal("expected to process channel update")
  2206  		}
  2207  		select {
  2208  		case <-ctx.broadcastedMessage:
  2209  			if isZombie {
  2210  				t.Fatal("expected to not broadcast zombie " +
  2211  					"channel update")
  2212  			}
  2213  		case <-time.After(2 * trickleDelay):
  2214  			if !isZombie {
  2215  				t.Fatal("expected to broadcast live channel " +
  2216  					"update")
  2217  			}
  2218  		}
  2219  	}
  2220  
  2221  	// We'll mark the edge for which we'll process announcements for as a
  2222  	// zombie within the router. This should reject any announcements for
  2223  	// this edge while it remains as a zombie.
  2224  	chanID := batch.chanAnn.ShortChannelID
  2225  	err = ctx.router.MarkEdgeZombie(
  2226  		chanID, batch.chanAnn.NodeID1, batch.chanAnn.NodeID2,
  2227  	)
  2228  	if err != nil {
  2229  		t.Fatalf("unable to mark channel %v as zombie: %v", chanID, err)
  2230  	}
  2231  
  2232  	processAnnouncements(true)
  2233  
  2234  	// If we then mark the edge as live, the edge's zombie status should be
  2235  	// overridden and the announcements should be processed.
  2236  	if err := ctx.router.MarkEdgeLive(chanID); err != nil {
  2237  		t.Fatalf("unable mark channel %v as zombie: %v", chanID, err)
  2238  	}
  2239  
  2240  	processAnnouncements(false)
  2241  }
  2242  
  2243  // TestProcessZombieEdgeNowLive ensures that we can detect when a zombie edge
  2244  // becomes live by receiving a fresh update.
  2245  func TestProcessZombieEdgeNowLive(t *testing.T) {
  2246  	t.Parallel()
  2247  
  2248  	// We'll start by creating our test context with a batch of
  2249  	// announcements.
  2250  	ctx, cleanup, err := createTestCtx(0)
  2251  	if err != nil {
  2252  		t.Fatalf("unable to create test context: %v", err)
  2253  	}
  2254  	defer cleanup()
  2255  
  2256  	batch, err := createRemoteAnnouncements(0)
  2257  	if err != nil {
  2258  		t.Fatalf("unable to create announcements: %v", err)
  2259  	}
  2260  
  2261  	remotePeer := &mockPeer{pk: remoteKeyPriv1.PubKey()}
  2262  
  2263  	// processAnnouncement is a helper closure we'll use to ensure an
  2264  	// announcement is properly processed/rejected based on whether the edge
  2265  	// is a zombie or not. The expectsErr boolean can be used to determine
  2266  	// whether we should expect an error when processing the message, while
  2267  	// the isZombie boolean can be used to determine whether the
  2268  	// announcement should be or not be broadcast.
  2269  	processAnnouncement := func(ann lnwire.Message, isZombie, expectsErr bool) {
  2270  		t.Helper()
  2271  
  2272  		errChan := ctx.gossiper.ProcessRemoteAnnouncement(
  2273  			ann, remotePeer,
  2274  		)
  2275  
  2276  		var err error
  2277  		select {
  2278  		case err = <-errChan:
  2279  		case <-time.After(time.Second):
  2280  			t.Fatal("expected to process announcement")
  2281  		}
  2282  		if expectsErr && err == nil {
  2283  			t.Fatal("expected error when processing announcement")
  2284  		}
  2285  		if !expectsErr && err != nil {
  2286  			t.Fatalf("received unexpected error when processing "+
  2287  				"announcement: %v", err)
  2288  		}
  2289  
  2290  		select {
  2291  		case msgWithSenders := <-ctx.broadcastedMessage:
  2292  			if isZombie {
  2293  				t.Fatal("expected to not broadcast zombie " +
  2294  					"channel message")
  2295  			}
  2296  			assertMessage(t, ann, msgWithSenders.msg)
  2297  
  2298  		case <-time.After(2 * trickleDelay):
  2299  			if !isZombie {
  2300  				t.Fatal("expected to broadcast live channel " +
  2301  					"message")
  2302  			}
  2303  		}
  2304  	}
  2305  
  2306  	// We'll generate a channel update with a timestamp far enough in the
  2307  	// past to consider it a zombie.
  2308  	zombieTimestamp := time.Now().Add(-routing.DefaultChannelPruneExpiry)
  2309  	batch.chanUpdAnn2.Timestamp = uint32(zombieTimestamp.Unix())
  2310  	if err := signUpdate(remoteKeyPriv2, batch.chanUpdAnn2); err != nil {
  2311  		t.Fatalf("unable to sign update with new timestamp: %v", err)
  2312  	}
  2313  
  2314  	// We'll also add the edge to our zombie index, provide a blank pubkey
  2315  	// for the first node as we're simulating the sitaution where the first
  2316  	// ndoe is updating but the second node isn't. In this case we only
  2317  	// want to allow a new update from the second node to allow the entire
  2318  	// edge to be resurrected.
  2319  	chanID := batch.chanAnn.ShortChannelID
  2320  	err = ctx.router.MarkEdgeZombie(
  2321  		chanID, [33]byte{}, batch.chanAnn.NodeID2,
  2322  	)
  2323  	if err != nil {
  2324  		t.Fatalf("unable mark channel %v as zombie: %v", chanID, err)
  2325  	}
  2326  
  2327  	// If we send a new update but for the other direction of the channel,
  2328  	// then it should still be rejected as we want a fresh update from the
  2329  	// one that was considered stale.
  2330  	batch.chanUpdAnn1.Timestamp = uint32(time.Now().Unix())
  2331  	if err := signUpdate(remoteKeyPriv1, batch.chanUpdAnn1); err != nil {
  2332  		t.Fatalf("unable to sign update with new timestamp: %v", err)
  2333  	}
  2334  	processAnnouncement(batch.chanUpdAnn1, true, true)
  2335  
  2336  	// At this point, the channel should still be consiered a zombie.
  2337  	_, _, _, err = ctx.router.GetChannelByID(chanID)
  2338  	if err != channeldb.ErrZombieEdge {
  2339  		t.Fatalf("channel should still be a zombie")
  2340  	}
  2341  
  2342  	// Attempting to process the current channel update should fail due to
  2343  	// its edge being considered a zombie and its timestamp not being within
  2344  	// the live horizon. We should not expect an error here since it is just
  2345  	// a stale update.
  2346  	processAnnouncement(batch.chanUpdAnn2, true, false)
  2347  
  2348  	// Now we'll generate a new update with a fresh timestamp. This should
  2349  	// allow the channel update to be processed even though it is still
  2350  	// marked as a zombie within the index, since it is a fresh new update.
  2351  	// This won't work however since we'll sign it with the wrong private
  2352  	// key (remote key 1 rather than remote key 2).
  2353  	batch.chanUpdAnn2.Timestamp = uint32(time.Now().Unix())
  2354  	if err := signUpdate(remoteKeyPriv1, batch.chanUpdAnn2); err != nil {
  2355  		t.Fatalf("unable to sign update with new timestamp: %v", err)
  2356  	}
  2357  
  2358  	// We should expect an error due to the signature being invalid.
  2359  	processAnnouncement(batch.chanUpdAnn2, true, true)
  2360  
  2361  	// Signing it with the correct private key should allow it to be
  2362  	// processed.
  2363  	if err := signUpdate(remoteKeyPriv2, batch.chanUpdAnn2); err != nil {
  2364  		t.Fatalf("unable to sign update with new timestamp: %v", err)
  2365  	}
  2366  
  2367  	// The channel update cannot be successfully processed and broadcast
  2368  	// until the channel announcement is. Since the channel update indicates
  2369  	// a fresh new update, the gossiper should stash it until it sees the
  2370  	// corresponding channel announcement.
  2371  	updateErrChan := ctx.gossiper.ProcessRemoteAnnouncement(
  2372  		batch.chanUpdAnn2, remotePeer,
  2373  	)
  2374  
  2375  	select {
  2376  	case <-ctx.broadcastedMessage:
  2377  		t.Fatal("expected to not broadcast live channel update " +
  2378  			"without announcement")
  2379  	case <-time.After(2 * trickleDelay):
  2380  	}
  2381  
  2382  	// We'll go ahead and process the channel announcement to ensure the
  2383  	// channel update is processed thereafter.
  2384  	processAnnouncement(batch.chanAnn, false, false)
  2385  
  2386  	// After successfully processing the announcement, the channel update
  2387  	// should have been processed and broadcast successfully as well.
  2388  	select {
  2389  	case err := <-updateErrChan:
  2390  		if err != nil {
  2391  			t.Fatalf("expected to process live channel update: %v",
  2392  				err)
  2393  		}
  2394  	case <-time.After(time.Second):
  2395  		t.Fatal("expected to process announcement")
  2396  	}
  2397  
  2398  	select {
  2399  	case msgWithSenders := <-ctx.broadcastedMessage:
  2400  		assertMessage(t, batch.chanUpdAnn2, msgWithSenders.msg)
  2401  	case <-time.After(2 * trickleDelay):
  2402  		t.Fatal("expected to broadcast live channel update")
  2403  	}
  2404  }
  2405  
  2406  // TestReceiveRemoteChannelUpdateFirst tests that if we receive a ChannelUpdate
  2407  // from the remote before we have processed our own ChannelAnnouncement, it will
  2408  // be reprocessed later, after our ChannelAnnouncement.
  2409  func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
  2410  	t.Parallel()
  2411  
  2412  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  2413  	if err != nil {
  2414  		t.Fatalf("can't create context: %v", err)
  2415  	}
  2416  	defer cleanup()
  2417  
  2418  	batch, err := createLocalAnnouncements(0)
  2419  	if err != nil {
  2420  		t.Fatalf("can't generate announcements: %v", err)
  2421  	}
  2422  
  2423  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  2424  	if err != nil {
  2425  		t.Fatalf("unable to parse pubkey: %v", err)
  2426  	}
  2427  
  2428  	// Set up a channel that we can use to inspect the messages sent
  2429  	// directly from the gossiper.
  2430  	sentMsgs := make(chan lnwire.Message, 10)
  2431  	remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
  2432  
  2433  	// Override NotifyWhenOnline to return the remote peer which we expect
  2434  	// meesages to be sent to.
  2435  	ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
  2436  		peerChan chan<- lnpeer.Peer) {
  2437  
  2438  		peerChan <- remotePeer
  2439  	}
  2440  
  2441  	// Recreate the case where the remote node is sending us its ChannelUpdate
  2442  	// before we have been able to process our own ChannelAnnouncement and
  2443  	// ChannelUpdate.
  2444  	errRemoteAnn := ctx.gossiper.ProcessRemoteAnnouncement(
  2445  		batch.chanUpdAnn2, remotePeer,
  2446  	)
  2447  	select {
  2448  	case <-ctx.broadcastedMessage:
  2449  		t.Fatal("channel update announcement was broadcast")
  2450  	case <-time.After(2 * trickleDelay):
  2451  	}
  2452  
  2453  	err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, remotePeer)
  2454  	if err != nil {
  2455  		t.Fatalf("unable to process node ann: %v", err)
  2456  	}
  2457  	select {
  2458  	case <-ctx.broadcastedMessage:
  2459  		t.Fatal("node announcement was broadcast")
  2460  	case <-time.After(2 * trickleDelay):
  2461  	}
  2462  
  2463  	// Since the remote ChannelUpdate was added for an edge that
  2464  	// we did not already know about, it should have been added
  2465  	// to the map of premature ChannelUpdates. Check that nothing
  2466  	// was added to the graph.
  2467  	chanInfo, e1, e2, err := ctx.router.GetChannelByID(batch.chanUpdAnn1.ShortChannelID)
  2468  	if err != channeldb.ErrEdgeNotFound {
  2469  		t.Fatalf("Expected ErrEdgeNotFound, got: %v", err)
  2470  	}
  2471  	if chanInfo != nil {
  2472  		t.Fatalf("chanInfo was not nil")
  2473  	}
  2474  	if e1 != nil {
  2475  		t.Fatalf("e1 was not nil")
  2476  	}
  2477  	if e2 != nil {
  2478  		t.Fatalf("e2 was not nil")
  2479  	}
  2480  
  2481  	// Recreate lightning network topology. Initialize router with channel
  2482  	// between two nodes.
  2483  	err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn)
  2484  	if err != nil {
  2485  		t.Fatalf("unable to process :%v", err)
  2486  	}
  2487  	select {
  2488  	case <-ctx.broadcastedMessage:
  2489  		t.Fatal("channel announcement was broadcast")
  2490  	case <-time.After(2 * trickleDelay):
  2491  	}
  2492  
  2493  	err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1)
  2494  	if err != nil {
  2495  		t.Fatalf("unable to process :%v", err)
  2496  	}
  2497  	select {
  2498  	case <-ctx.broadcastedMessage:
  2499  		t.Fatal("channel update announcement was broadcast")
  2500  	case <-time.After(2 * trickleDelay):
  2501  	}
  2502  
  2503  	err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1)
  2504  	if err != nil {
  2505  		t.Fatalf("unable to process :%v", err)
  2506  	}
  2507  	select {
  2508  	case <-ctx.broadcastedMessage:
  2509  		t.Fatal("node announcement was broadcast")
  2510  	case <-time.After(2 * trickleDelay):
  2511  	}
  2512  
  2513  	// The local ChannelUpdate should now be sent directly to the remote peer,
  2514  	// such that the edge can be used for routing, regardless if this channel
  2515  	// is announced or not (private channel).
  2516  	select {
  2517  	case msg := <-sentMsgs:
  2518  		assertMessage(t, batch.chanUpdAnn1, msg)
  2519  	case <-time.After(1 * time.Second):
  2520  		t.Fatal("gossiper did not send channel update to peer")
  2521  	}
  2522  
  2523  	// At this point the remote ChannelUpdate we received earlier should
  2524  	// be reprocessed, as we now have the necessary edge entry in the graph.
  2525  	select {
  2526  	case err := <-errRemoteAnn:
  2527  		if err != nil {
  2528  			t.Fatalf("error re-processing remote update: %v", err)
  2529  		}
  2530  	case <-time.After(2 * trickleDelay):
  2531  		t.Fatalf("remote update was not processed")
  2532  	}
  2533  
  2534  	// Check that the ChannelEdgePolicy was added to the graph.
  2535  	chanInfo, e1, e2, err = ctx.router.GetChannelByID(
  2536  		batch.chanUpdAnn1.ShortChannelID,
  2537  	)
  2538  	if err != nil {
  2539  		t.Fatalf("unable to get channel from router: %v", err)
  2540  	}
  2541  	if chanInfo == nil {
  2542  		t.Fatalf("chanInfo was nil")
  2543  	}
  2544  	if e1 == nil {
  2545  		t.Fatalf("e1 was nil")
  2546  	}
  2547  	if e2 == nil {
  2548  		t.Fatalf("e2 was nil")
  2549  	}
  2550  
  2551  	// Pretending that we receive local channel announcement from funding
  2552  	// manager, thereby kick off the announcement exchange process.
  2553  	err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn)
  2554  	if err != nil {
  2555  		t.Fatalf("unable to process :%v", err)
  2556  	}
  2557  
  2558  	select {
  2559  	case <-ctx.broadcastedMessage:
  2560  		t.Fatal("announcements were broadcast")
  2561  	case <-time.After(2 * trickleDelay):
  2562  	}
  2563  
  2564  	number := 0
  2565  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  2566  		func(*channeldb.WaitingProof) error {
  2567  			number++
  2568  			return nil
  2569  		},
  2570  		func() {
  2571  			number = 0
  2572  		},
  2573  	); err != nil {
  2574  		t.Fatalf("unable to retrieve objects from store: %v", err)
  2575  	}
  2576  
  2577  	if number != 1 {
  2578  		t.Fatal("wrong number of objects in storage")
  2579  	}
  2580  
  2581  	err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  2582  		batch.remoteProofAnn, remotePeer,
  2583  	)
  2584  	if err != nil {
  2585  		t.Fatalf("unable to process :%v", err)
  2586  	}
  2587  
  2588  	for i := 0; i < 4; i++ {
  2589  		select {
  2590  		case <-ctx.broadcastedMessage:
  2591  		case <-time.After(time.Second):
  2592  			t.Fatal("announcement wasn't broadcast")
  2593  		}
  2594  	}
  2595  
  2596  	number = 0
  2597  	if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
  2598  		func(*channeldb.WaitingProof) error {
  2599  			number++
  2600  			return nil
  2601  		},
  2602  		func() {
  2603  			number = 0
  2604  		},
  2605  	); err != nil && err != channeldb.ErrWaitingProofNotFound {
  2606  		t.Fatalf("unable to retrieve objects from store: %v", err)
  2607  	}
  2608  
  2609  	if number != 0 {
  2610  		t.Fatal("waiting proof should be removed from storage")
  2611  	}
  2612  }
  2613  
  2614  // TestExtraDataChannelAnnouncementValidation tests that we're able to properly
  2615  // validate a ChannelAnnouncement that includes opaque bytes that we don't
  2616  // currently know of.
  2617  func TestExtraDataChannelAnnouncementValidation(t *testing.T) {
  2618  	t.Parallel()
  2619  
  2620  	ctx, cleanup, err := createTestCtx(0)
  2621  	if err != nil {
  2622  		t.Fatalf("can't create context: %v", err)
  2623  	}
  2624  	defer cleanup()
  2625  
  2626  	remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
  2627  
  2628  	// We'll now create an announcement that contains an extra set of bytes
  2629  	// that we don't know of ourselves, but should still include in the
  2630  	// final signature check.
  2631  	extraBytes := []byte("gotta validate this stil!")
  2632  	ca, err := createRemoteChannelAnnouncement(0, extraBytes)
  2633  	if err != nil {
  2634  		t.Fatalf("can't create channel announcement: %v", err)
  2635  	}
  2636  
  2637  	// We'll now send the announcement to the main gossiper. We should be
  2638  	// able to validate this announcement to problem.
  2639  	select {
  2640  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, remotePeer):
  2641  	case <-time.After(2 * time.Second):
  2642  		t.Fatal("did not process remote announcement")
  2643  	}
  2644  	if err != nil {
  2645  		t.Fatalf("unable to process :%v", err)
  2646  	}
  2647  }
  2648  
  2649  // TestExtraDataChannelUpdateValidation tests that we're able to properly
  2650  // validate a ChannelUpdate that includes opaque bytes that we don't currently
  2651  // know of.
  2652  func TestExtraDataChannelUpdateValidation(t *testing.T) {
  2653  	t.Parallel()
  2654  
  2655  	timestamp := testTimestamp
  2656  	ctx, cleanup, err := createTestCtx(0)
  2657  	if err != nil {
  2658  		t.Fatalf("can't create context: %v", err)
  2659  	}
  2660  	defer cleanup()
  2661  
  2662  	remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
  2663  
  2664  	// In this scenario, we'll create two announcements, one regular
  2665  	// channel announcement, and another channel update announcement, that
  2666  	// has additional data that we won't be interpreting.
  2667  	chanAnn, err := createRemoteChannelAnnouncement(0)
  2668  	if err != nil {
  2669  		t.Fatalf("unable to create chan ann: %v", err)
  2670  	}
  2671  	chanUpdAnn1, err := createUpdateAnnouncement(
  2672  		0, 0, remoteKeyPriv1, timestamp,
  2673  		[]byte("must also validate"),
  2674  	)
  2675  	if err != nil {
  2676  		t.Fatalf("unable to create chan up: %v", err)
  2677  	}
  2678  	chanUpdAnn2, err := createUpdateAnnouncement(
  2679  		0, 1, remoteKeyPriv2, timestamp,
  2680  		[]byte("must also validate"),
  2681  	)
  2682  	if err != nil {
  2683  		t.Fatalf("unable to create chan up: %v", err)
  2684  	}
  2685  
  2686  	// We should be able to properly validate all three messages without
  2687  	// any issue.
  2688  	select {
  2689  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanAnn, remotePeer):
  2690  	case <-time.After(2 * time.Second):
  2691  		t.Fatal("did not process remote announcement")
  2692  	}
  2693  	if err != nil {
  2694  		t.Fatalf("unable to process announcement: %v", err)
  2695  	}
  2696  
  2697  	select {
  2698  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn1, remotePeer):
  2699  	case <-time.After(2 * time.Second):
  2700  		t.Fatal("did not process remote announcement")
  2701  	}
  2702  	if err != nil {
  2703  		t.Fatalf("unable to process announcement: %v", err)
  2704  	}
  2705  
  2706  	select {
  2707  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn2, remotePeer):
  2708  	case <-time.After(2 * time.Second):
  2709  		t.Fatal("did not process remote announcement")
  2710  	}
  2711  	if err != nil {
  2712  		t.Fatalf("unable to process announcement: %v", err)
  2713  	}
  2714  }
  2715  
  2716  // TestExtraDataNodeAnnouncementValidation tests that we're able to properly
  2717  // validate a NodeAnnouncement that includes opaque bytes that we don't
  2718  // currently know of.
  2719  func TestExtraDataNodeAnnouncementValidation(t *testing.T) {
  2720  	t.Parallel()
  2721  
  2722  	ctx, cleanup, err := createTestCtx(0)
  2723  	if err != nil {
  2724  		t.Fatalf("can't create context: %v", err)
  2725  	}
  2726  	defer cleanup()
  2727  
  2728  	remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
  2729  	timestamp := testTimestamp
  2730  
  2731  	// We'll create a node announcement that includes a set of opaque data
  2732  	// which we don't know of, but will store anyway in order to ensure
  2733  	// upgrades can flow smoothly in the future.
  2734  	nodeAnn, err := createNodeAnnouncement(
  2735  		remoteKeyPriv1, timestamp, []byte("gotta validate"),
  2736  	)
  2737  	if err != nil {
  2738  		t.Fatalf("can't create node announcement: %v", err)
  2739  	}
  2740  
  2741  	select {
  2742  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, remotePeer):
  2743  	case <-time.After(2 * time.Second):
  2744  		t.Fatal("did not process remote announcement")
  2745  	}
  2746  	if err != nil {
  2747  		t.Fatalf("unable to process announcement: %v", err)
  2748  	}
  2749  }
  2750  
  2751  // assertBroadcast checks that num messages are being broadcasted from the
  2752  // gossiper. The broadcasted messages are returned.
  2753  func assertBroadcast(t *testing.T, ctx *testCtx, num int) []lnwire.Message {
  2754  	t.Helper()
  2755  
  2756  	var msgs []lnwire.Message
  2757  	for i := 0; i < num; i++ {
  2758  		select {
  2759  		case msg := <-ctx.broadcastedMessage:
  2760  			msgs = append(msgs, msg.msg)
  2761  		case <-time.After(time.Second):
  2762  			t.Fatalf("expected %d messages to be broadcast, only "+
  2763  				"got %d", num, i)
  2764  		}
  2765  	}
  2766  
  2767  	// No more messages should be broadcast.
  2768  	select {
  2769  	case msg := <-ctx.broadcastedMessage:
  2770  		t.Fatalf("unexpected message was broadcast: %T", msg.msg)
  2771  	case <-time.After(2 * trickleDelay):
  2772  	}
  2773  
  2774  	return msgs
  2775  }
  2776  
  2777  // assertProcessAnnouncemnt is a helper method that checks that the result of
  2778  // processing an announcement is successful.
  2779  func assertProcessAnnouncement(t *testing.T, result chan error) {
  2780  	t.Helper()
  2781  
  2782  	select {
  2783  	case err := <-result:
  2784  		if err != nil {
  2785  			t.Fatalf("unable to process :%v", err)
  2786  		}
  2787  	case <-time.After(2 * time.Second):
  2788  		t.Fatal("did not process announcement")
  2789  	}
  2790  }
  2791  
  2792  // TestRetransmit checks that the expected announcements are retransmitted when
  2793  // the retransmit ticker ticks.
  2794  func TestRetransmit(t *testing.T) {
  2795  	t.Parallel()
  2796  
  2797  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  2798  	if err != nil {
  2799  		t.Fatalf("can't create context: %v", err)
  2800  	}
  2801  	defer cleanup()
  2802  
  2803  	batch, err := createLocalAnnouncements(0)
  2804  	if err != nil {
  2805  		t.Fatalf("can't generate announcements: %v", err)
  2806  	}
  2807  
  2808  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  2809  	if err != nil {
  2810  		t.Fatalf("unable to parse pubkey: %v", err)
  2811  	}
  2812  	remotePeer := &mockPeer{remoteKey, nil, nil}
  2813  
  2814  	// Process a local channel annoucement, channel update and node
  2815  	// announcement. No messages should be broadcasted yet, since no proof
  2816  	// has been exchanged.
  2817  	assertProcessAnnouncement(
  2818  		t, ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn),
  2819  	)
  2820  	assertBroadcast(t, ctx, 0)
  2821  
  2822  	assertProcessAnnouncement(
  2823  		t, ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1),
  2824  	)
  2825  	assertBroadcast(t, ctx, 0)
  2826  
  2827  	assertProcessAnnouncement(
  2828  		t, ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1),
  2829  	)
  2830  	assertBroadcast(t, ctx, 0)
  2831  
  2832  	// Add the remote channel update to the gossiper. Similarly, nothing
  2833  	// should be broadcasted.
  2834  	assertProcessAnnouncement(
  2835  		t, ctx.gossiper.ProcessRemoteAnnouncement(
  2836  			batch.chanUpdAnn2, remotePeer,
  2837  		),
  2838  	)
  2839  	assertBroadcast(t, ctx, 0)
  2840  
  2841  	// Now add the local and remote proof to the gossiper, which should
  2842  	// trigger a broadcast of the announcements.
  2843  	assertProcessAnnouncement(
  2844  		t, ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn),
  2845  	)
  2846  	assertBroadcast(t, ctx, 0)
  2847  
  2848  	assertProcessAnnouncement(
  2849  		t, ctx.gossiper.ProcessRemoteAnnouncement(
  2850  			batch.remoteProofAnn, remotePeer,
  2851  		),
  2852  	)
  2853  
  2854  	// checkAnncouncments make sure the expected number of channel
  2855  	// announcements + channel updates + node announcements are broadcast.
  2856  	checkAnnouncements := func(t *testing.T, chanAnns, chanUpds,
  2857  		nodeAnns int) {
  2858  
  2859  		t.Helper()
  2860  
  2861  		num := chanAnns + chanUpds + nodeAnns
  2862  		anns := assertBroadcast(t, ctx, num)
  2863  
  2864  		// Count the received announcements.
  2865  		var chanAnn, chanUpd, nodeAnn int
  2866  		for _, msg := range anns {
  2867  			switch msg.(type) {
  2868  			case *lnwire.ChannelAnnouncement:
  2869  				chanAnn++
  2870  			case *lnwire.ChannelUpdate:
  2871  				chanUpd++
  2872  			case *lnwire.NodeAnnouncement:
  2873  				nodeAnn++
  2874  			}
  2875  		}
  2876  
  2877  		if chanAnn != chanAnns || chanUpd != chanUpds ||
  2878  			nodeAnn != nodeAnns {
  2879  			t.Fatalf("unexpected number of announcements: "+
  2880  				"chanAnn=%d, chanUpd=%d, nodeAnn=%d",
  2881  				chanAnn, chanUpd, nodeAnn)
  2882  		}
  2883  	}
  2884  
  2885  	// All announcements should be broadcast, including the remote channel
  2886  	// update.
  2887  	checkAnnouncements(t, 1, 2, 1)
  2888  
  2889  	// Now let the retransmit ticker tick, which should trigger updates to
  2890  	// be rebroadcast.
  2891  	now := time.Unix(int64(testTimestamp), 0)
  2892  	future := now.Add(rebroadcastInterval + 10*time.Second)
  2893  	select {
  2894  	case ctx.gossiper.cfg.RetransmitTicker.(*ticker.Force).Force <- future:
  2895  	case <-time.After(2 * time.Second):
  2896  		t.Fatalf("unable to force tick")
  2897  	}
  2898  
  2899  	// The channel announcement + local channel update + node announcement
  2900  	// should be re-broadcast.
  2901  	checkAnnouncements(t, 1, 1, 1)
  2902  }
  2903  
  2904  // TestNodeAnnouncementNoChannels tests that NodeAnnouncements for nodes with
  2905  // no existing channels in the graph do not get forwarded.
  2906  func TestNodeAnnouncementNoChannels(t *testing.T) {
  2907  	t.Parallel()
  2908  
  2909  	ctx, cleanup, err := createTestCtx(0)
  2910  	if err != nil {
  2911  		t.Fatalf("can't create context: %v", err)
  2912  	}
  2913  	defer cleanup()
  2914  
  2915  	batch, err := createRemoteAnnouncements(0)
  2916  	if err != nil {
  2917  		t.Fatalf("can't generate announcements: %v", err)
  2918  	}
  2919  
  2920  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  2921  	if err != nil {
  2922  		t.Fatalf("unable to parse pubkey: %v", err)
  2923  	}
  2924  	remotePeer := &mockPeer{remoteKey, nil, nil}
  2925  
  2926  	// Process the remote node announcement.
  2927  	select {
  2928  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2,
  2929  		remotePeer):
  2930  	case <-time.After(2 * time.Second):
  2931  		t.Fatal("did not process remote announcement")
  2932  	}
  2933  	if err != nil {
  2934  		t.Fatalf("unable to process announcement: %v", err)
  2935  	}
  2936  
  2937  	// Since no channels or node announcements were already in the graph,
  2938  	// the node announcement should be ignored, and not forwarded.
  2939  	select {
  2940  	case <-ctx.broadcastedMessage:
  2941  		t.Fatal("node announcement was broadcast")
  2942  	case <-time.After(2 * trickleDelay):
  2943  	}
  2944  
  2945  	// Now add the node's channel to the graph by processing the channel
  2946  	// announement and channel update.
  2947  	select {
  2948  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanAnn,
  2949  		remotePeer):
  2950  	case <-time.After(2 * time.Second):
  2951  		t.Fatal("did not process remote announcement")
  2952  	}
  2953  	if err != nil {
  2954  		t.Fatalf("unable to process announcement: %v", err)
  2955  	}
  2956  
  2957  	select {
  2958  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2,
  2959  		remotePeer):
  2960  	case <-time.After(2 * time.Second):
  2961  		t.Fatal("did not process remote announcement")
  2962  	}
  2963  	if err != nil {
  2964  		t.Fatalf("unable to process announcement: %v", err)
  2965  	}
  2966  
  2967  	// Now process the node announcement again.
  2968  	select {
  2969  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, remotePeer):
  2970  	case <-time.After(2 * time.Second):
  2971  		t.Fatal("did not process remote announcement")
  2972  	}
  2973  	if err != nil {
  2974  		t.Fatalf("unable to process announcement: %v", err)
  2975  	}
  2976  
  2977  	// This time the node announcement should be forwarded. The same should
  2978  	// the channel announcement and update be.
  2979  	for i := 0; i < 3; i++ {
  2980  		select {
  2981  		case <-ctx.broadcastedMessage:
  2982  		case <-time.After(time.Second):
  2983  			t.Fatal("announcement wasn't broadcast")
  2984  		}
  2985  	}
  2986  
  2987  	// Processing the same node announement again should be ignored, as it
  2988  	// is stale.
  2989  	select {
  2990  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2,
  2991  		remotePeer):
  2992  	case <-time.After(2 * time.Second):
  2993  		t.Fatal("did not process remote announcement")
  2994  	}
  2995  	if err != nil {
  2996  		t.Fatalf("unable to process announcement: %v", err)
  2997  	}
  2998  
  2999  	select {
  3000  	case <-ctx.broadcastedMessage:
  3001  		t.Fatal("node announcement was broadcast")
  3002  	case <-time.After(2 * trickleDelay):
  3003  	}
  3004  }
  3005  
  3006  // TestOptionalFieldsChannelUpdateValidation tests that we're able to properly
  3007  // validate the msg flags and optional max HTLC field of a ChannelUpdate.
  3008  func TestOptionalFieldsChannelUpdateValidation(t *testing.T) {
  3009  	t.Parallel()
  3010  
  3011  	ctx, cleanup, err := createTestCtx(0)
  3012  	if err != nil {
  3013  		t.Fatalf("can't create context: %v", err)
  3014  	}
  3015  	defer cleanup()
  3016  
  3017  	chanUpdateHeight := uint32(0)
  3018  	timestamp := uint32(123456)
  3019  	nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
  3020  
  3021  	// In this scenario, we'll test whether the message flags field in a channel
  3022  	// update is properly handled.
  3023  	chanAnn, err := createRemoteChannelAnnouncement(chanUpdateHeight)
  3024  	if err != nil {
  3025  		t.Fatalf("can't create channel announcement: %v", err)
  3026  	}
  3027  
  3028  	select {
  3029  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanAnn, nodePeer):
  3030  	case <-time.After(2 * time.Second):
  3031  		t.Fatal("did not process remote announcement")
  3032  	}
  3033  	if err != nil {
  3034  		t.Fatalf("unable to process announcement: %v", err)
  3035  	}
  3036  
  3037  	// The first update should fail from an invalid max HTLC field, which is
  3038  	// less than the min HTLC.
  3039  	chanUpdAnn, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
  3040  	if err != nil {
  3041  		t.Fatalf("unable to create channel update: %v", err)
  3042  	}
  3043  
  3044  	chanUpdAnn.HtlcMinimumMAtoms = 5000
  3045  	chanUpdAnn.HtlcMaximumMAtoms = 4000
  3046  	if err := signUpdate(remoteKeyPriv1, chanUpdAnn); err != nil {
  3047  		t.Fatalf("unable to sign channel update: %v", err)
  3048  	}
  3049  
  3050  	select {
  3051  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer):
  3052  	case <-time.After(2 * time.Second):
  3053  		t.Fatal("did not process remote announcement")
  3054  	}
  3055  	if err == nil || !strings.Contains(err.Error(), "invalid max htlc") {
  3056  		t.Fatalf("expected chan update to error, instead got %v", err)
  3057  	}
  3058  
  3059  	// The second update should fail because the message flag is set but
  3060  	// the max HTLC field is 0.
  3061  	chanUpdAnn.HtlcMinimumMAtoms = 0
  3062  	chanUpdAnn.HtlcMaximumMAtoms = 0
  3063  	if err := signUpdate(remoteKeyPriv1, chanUpdAnn); err != nil {
  3064  		t.Fatalf("unable to sign channel update: %v", err)
  3065  	}
  3066  
  3067  	select {
  3068  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer):
  3069  	case <-time.After(2 * time.Second):
  3070  		t.Fatal("did not process remote announcement")
  3071  	}
  3072  	if err == nil || !strings.Contains(err.Error(), "invalid max htlc") {
  3073  		t.Fatalf("expected chan update to error, instead got %v", err)
  3074  	}
  3075  
  3076  	// The final update should succeed, since setting the flag 0 means the
  3077  	// nonsense max_htlc field will just be ignored.
  3078  	chanUpdAnn.MessageFlags = 0
  3079  	if err := signUpdate(remoteKeyPriv1, chanUpdAnn); err != nil {
  3080  		t.Fatalf("unable to sign channel update: %v", err)
  3081  	}
  3082  
  3083  	select {
  3084  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer):
  3085  	case <-time.After(2 * time.Second):
  3086  		t.Fatal("did not process remote announcement")
  3087  	}
  3088  	if err != nil {
  3089  		t.Fatalf("unable to process announcement: %v", err)
  3090  	}
  3091  }
  3092  
  3093  // TestSendChannelUpdateReliably ensures that the latest channel update for a
  3094  // channel is always sent upon the remote party reconnecting.
  3095  func TestSendChannelUpdateReliably(t *testing.T) {
  3096  	t.Parallel()
  3097  
  3098  	// We'll start by creating our test context and a batch of
  3099  	// announcements.
  3100  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  3101  	if err != nil {
  3102  		t.Fatalf("unable to create test context: %v", err)
  3103  	}
  3104  	defer cleanup()
  3105  
  3106  	batch, err := createLocalAnnouncements(0)
  3107  	if err != nil {
  3108  		t.Fatalf("can't generate announcements: %v", err)
  3109  	}
  3110  
  3111  	// We'll also create two keys, one for ourselves and another for the
  3112  	// remote party.
  3113  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  3114  	if err != nil {
  3115  		t.Fatalf("unable to parse pubkey: %v", err)
  3116  	}
  3117  
  3118  	// Set up a channel we can use to inspect messages sent by the
  3119  	// gossiper to the remote peer.
  3120  	sentToPeer := make(chan lnwire.Message, 1)
  3121  	remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit}
  3122  
  3123  	// Since we first wait to be notified of the peer before attempting to
  3124  	// send the message, we'll overwrite NotifyWhenOnline and
  3125  	// NotifyWhenOffline to instead give us access to the channel that will
  3126  	// receive the notification.
  3127  	notifyOnline := make(chan chan<- lnpeer.Peer, 1)
  3128  	ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(_ [33]byte,
  3129  		peerChan chan<- lnpeer.Peer) {
  3130  
  3131  		notifyOnline <- peerChan
  3132  	}
  3133  	notifyOffline := make(chan chan struct{}, 1)
  3134  	ctx.gossiper.reliableSender.cfg.NotifyWhenOffline = func(
  3135  		_ [33]byte) <-chan struct{} {
  3136  
  3137  		c := make(chan struct{}, 1)
  3138  		notifyOffline <- c
  3139  		return c
  3140  	}
  3141  
  3142  	// assertMsgSent is a helper closure we'll use to determine if the
  3143  	// correct gossip message was sent.
  3144  	assertMsgSent := func(msg lnwire.Message) {
  3145  		t.Helper()
  3146  
  3147  		select {
  3148  		case msgSent := <-sentToPeer:
  3149  			assertMessage(t, msg, msgSent)
  3150  		case <-time.After(2 * time.Second):
  3151  			t.Fatalf("did not send %v message to peer",
  3152  				msg.MsgType())
  3153  		}
  3154  	}
  3155  
  3156  	// Process the channel announcement for which we'll send a channel
  3157  	// update for.
  3158  	select {
  3159  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
  3160  	case <-time.After(2 * time.Second):
  3161  		t.Fatal("did not process local channel announcement")
  3162  	}
  3163  	if err != nil {
  3164  		t.Fatalf("unable to process local channel announcement: %v", err)
  3165  	}
  3166  
  3167  	// It should not be broadcast due to not having an announcement proof.
  3168  	select {
  3169  	case <-ctx.broadcastedMessage:
  3170  		t.Fatal("channel announcement was broadcast")
  3171  	case <-time.After(2 * trickleDelay):
  3172  	}
  3173  
  3174  	// Now, we'll process the channel update.
  3175  	select {
  3176  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
  3177  	case <-time.After(2 * time.Second):
  3178  		t.Fatal("did not process local channel update")
  3179  	}
  3180  	if err != nil {
  3181  		t.Fatalf("unable to process local channel update: %v", err)
  3182  	}
  3183  
  3184  	// It should also not be broadcast due to the announcement not having an
  3185  	// announcement proof.
  3186  	select {
  3187  	case <-ctx.broadcastedMessage:
  3188  		t.Fatal("channel announcement was broadcast")
  3189  	case <-time.After(2 * trickleDelay):
  3190  	}
  3191  
  3192  	// It should however send it to the peer directly. In order to do so,
  3193  	// it'll request a notification for when the peer is online.
  3194  	var peerChan chan<- lnpeer.Peer
  3195  	select {
  3196  	case peerChan = <-notifyOnline:
  3197  	case <-time.After(2 * time.Second):
  3198  		t.Fatal("gossiper did not request notification upon peer " +
  3199  			"connection")
  3200  	}
  3201  
  3202  	// We can go ahead and notify the peer, which should trigger the message
  3203  	// to be sent.
  3204  	peerChan <- remotePeer
  3205  	assertMsgSent(batch.chanUpdAnn1)
  3206  
  3207  	// The gossiper should now request a notification for when the peer
  3208  	// disconnects. We'll also trigger this now.
  3209  	var offlineChan chan struct{}
  3210  	select {
  3211  	case offlineChan = <-notifyOffline:
  3212  	case <-time.After(2 * time.Second):
  3213  		t.Fatal("gossiper did not request notification upon peer " +
  3214  			"disconnection")
  3215  	}
  3216  
  3217  	close(offlineChan)
  3218  
  3219  	// Since it's offline, the gossiper should request another notification
  3220  	// for when it comes back online.
  3221  	select {
  3222  	case peerChan = <-notifyOnline:
  3223  	case <-time.After(2 * time.Second):
  3224  		t.Fatal("gossiper did not request notification upon peer " +
  3225  			"connection")
  3226  	}
  3227  
  3228  	// Now that the remote peer is offline, we'll send a new channel update.
  3229  	batch.chanUpdAnn1.Timestamp++
  3230  	if err := signUpdate(selfKeyPriv, batch.chanUpdAnn1); err != nil {
  3231  		t.Fatalf("unable to sign new channel update: %v", err)
  3232  	}
  3233  
  3234  	// With the new update created, we'll go ahead and process it.
  3235  	select {
  3236  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  3237  		batch.chanUpdAnn1,
  3238  	):
  3239  	case <-time.After(2 * time.Second):
  3240  		t.Fatal("did not process local channel update")
  3241  	}
  3242  	if err != nil {
  3243  		t.Fatalf("unable to process local channel update: %v", err)
  3244  	}
  3245  
  3246  	// It should also not be broadcast due to the announcement not having an
  3247  	// announcement proof.
  3248  	select {
  3249  	case <-ctx.broadcastedMessage:
  3250  		t.Fatal("channel announcement was broadcast")
  3251  	case <-time.After(2 * trickleDelay):
  3252  	}
  3253  
  3254  	// The message should not be sent since the peer remains offline.
  3255  	select {
  3256  	case msg := <-sentToPeer:
  3257  		t.Fatalf("received unexpected message: %v", spew.Sdump(msg))
  3258  	case <-time.After(time.Second):
  3259  	}
  3260  
  3261  	// Once again, we'll notify the peer is online and ensure the new
  3262  	// channel update is received. This will also cause an offline
  3263  	// notification to be requested again.
  3264  	peerChan <- remotePeer
  3265  	assertMsgSent(batch.chanUpdAnn1)
  3266  
  3267  	select {
  3268  	case offlineChan = <-notifyOffline:
  3269  	case <-time.After(2 * time.Second):
  3270  		t.Fatal("gossiper did not request notification upon peer " +
  3271  			"disconnection")
  3272  	}
  3273  
  3274  	// We'll then exchange proofs with the remote peer in order to announce
  3275  	// the channel.
  3276  	select {
  3277  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  3278  		batch.localProofAnn,
  3279  	):
  3280  	case <-time.After(2 * time.Second):
  3281  		t.Fatal("did not process local channel proof")
  3282  	}
  3283  	if err != nil {
  3284  		t.Fatalf("unable to process local channel proof: %v", err)
  3285  	}
  3286  
  3287  	// No messages should be broadcast as we don't have the full proof yet.
  3288  	select {
  3289  	case <-ctx.broadcastedMessage:
  3290  		t.Fatal("channel announcement was broadcast")
  3291  	case <-time.After(2 * trickleDelay):
  3292  	}
  3293  
  3294  	// Our proof should be sent to the remote peer however.
  3295  	assertMsgSent(batch.localProofAnn)
  3296  
  3297  	select {
  3298  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  3299  		batch.remoteProofAnn, remotePeer,
  3300  	):
  3301  	case <-time.After(2 * time.Second):
  3302  		t.Fatal("did not process remote channel proof")
  3303  	}
  3304  	if err != nil {
  3305  		t.Fatalf("unable to process remote channel proof: %v", err)
  3306  	}
  3307  
  3308  	// Now that we've constructed our full proof, we can assert that the
  3309  	// channel has been announced.
  3310  	for i := 0; i < 2; i++ {
  3311  		select {
  3312  		case <-ctx.broadcastedMessage:
  3313  		case <-time.After(2 * trickleDelay):
  3314  			t.Fatal("expected channel to be announced")
  3315  		}
  3316  	}
  3317  
  3318  	// With the channel announced, we'll generate a new channel update. This
  3319  	// one won't take the path of the reliable sender, as the channel has
  3320  	// already been announced. We'll keep track of the old message that is
  3321  	// now stale to use later on.
  3322  	staleChannelUpdate := batch.chanUpdAnn1
  3323  	newChannelUpdate := &lnwire.ChannelUpdate{}
  3324  	*newChannelUpdate = *staleChannelUpdate
  3325  	newChannelUpdate.Timestamp++
  3326  	if err := signUpdate(selfKeyPriv, newChannelUpdate); err != nil {
  3327  		t.Fatalf("unable to sign new channel update: %v", err)
  3328  	}
  3329  
  3330  	// Process the new channel update. It should not be sent to the peer
  3331  	// directly since the reliable sender only applies when the channel is
  3332  	// not announced.
  3333  	select {
  3334  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  3335  		newChannelUpdate,
  3336  	):
  3337  	case <-time.After(2 * time.Second):
  3338  		t.Fatal("did not process local channel update")
  3339  	}
  3340  	if err != nil {
  3341  		t.Fatalf("unable to process local channel update: %v", err)
  3342  	}
  3343  	select {
  3344  	case <-ctx.broadcastedMessage:
  3345  	case <-time.After(2 * trickleDelay):
  3346  		t.Fatal("channel update was not broadcast")
  3347  	}
  3348  	select {
  3349  	case msg := <-sentToPeer:
  3350  		t.Fatalf("received unexpected message: %v", spew.Sdump(msg))
  3351  	case <-time.After(time.Second):
  3352  	}
  3353  
  3354  	// Then, we'll trigger the reliable sender to send its pending messages
  3355  	// by triggering an offline notification for the peer, followed by an
  3356  	// online one.
  3357  	close(offlineChan)
  3358  
  3359  	select {
  3360  	case peerChan = <-notifyOnline:
  3361  	case <-time.After(2 * time.Second):
  3362  		t.Fatal("gossiper did not request notification upon peer " +
  3363  			"connection")
  3364  	}
  3365  
  3366  	peerChan <- remotePeer
  3367  
  3368  	// At this point, we should have sent both the AnnounceSignatures and
  3369  	// stale ChannelUpdate.
  3370  	for i := 0; i < 2; i++ {
  3371  		var msg lnwire.Message
  3372  		select {
  3373  		case msg = <-sentToPeer:
  3374  		case <-time.After(time.Second):
  3375  			t.Fatal("expected to send message")
  3376  		}
  3377  
  3378  		switch msg := msg.(type) {
  3379  		case *lnwire.ChannelUpdate:
  3380  			assertMessage(t, staleChannelUpdate, msg)
  3381  		case *lnwire.AnnounceSignatures:
  3382  			assertMessage(t, batch.localProofAnn, msg)
  3383  		default:
  3384  			t.Fatalf("send unexpected %v message", msg.MsgType())
  3385  		}
  3386  	}
  3387  
  3388  	// Since the messages above are now deemed as stale, they should be
  3389  	// removed from the message store.
  3390  	err = wait.NoError(func() error {
  3391  		msgs, err := ctx.gossiper.cfg.MessageStore.Messages()
  3392  		if err != nil {
  3393  			return fmt.Errorf("unable to retrieve pending "+
  3394  				"messages: %v", err)
  3395  		}
  3396  		if len(msgs) != 0 {
  3397  			return fmt.Errorf("expected no messages left, found %d",
  3398  				len(msgs))
  3399  		}
  3400  		return nil
  3401  	}, time.Second)
  3402  	if err != nil {
  3403  		t.Fatal(err)
  3404  	}
  3405  }
  3406  
  3407  func sendLocalMsg(t *testing.T, ctx *testCtx, msg lnwire.Message,
  3408  	optionalMsgFields ...OptionalMsgField) {
  3409  
  3410  	t.Helper()
  3411  
  3412  	var err error
  3413  	select {
  3414  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(
  3415  		msg, optionalMsgFields...,
  3416  	):
  3417  	case <-time.After(2 * time.Second):
  3418  		t.Fatal("did not process local announcement")
  3419  	}
  3420  	if err != nil {
  3421  		t.Fatalf("unable to process channel msg: %v", err)
  3422  	}
  3423  }
  3424  
  3425  func sendRemoteMsg(t *testing.T, ctx *testCtx, msg lnwire.Message,
  3426  	remotePeer lnpeer.Peer) {
  3427  
  3428  	t.Helper()
  3429  
  3430  	select {
  3431  	case err := <-ctx.gossiper.ProcessRemoteAnnouncement(msg, remotePeer):
  3432  		if err != nil {
  3433  			t.Fatalf("unable to process channel msg: %v", err)
  3434  		}
  3435  	case <-time.After(2 * time.Second):
  3436  		t.Fatal("did not process local announcement")
  3437  	}
  3438  }
  3439  
  3440  func assertBroadcastMsg(t *testing.T, ctx *testCtx,
  3441  	predicate func(lnwire.Message) error) {
  3442  
  3443  	t.Helper()
  3444  
  3445  	// We don't care about the order of the broadcast, only that our target
  3446  	// predicate returns true for any of the messages, so we'll continue to
  3447  	// retry until either we hit our timeout, or it returns with no error
  3448  	// (message found).
  3449  	err := wait.NoError(func() error {
  3450  		select {
  3451  		case msg := <-ctx.broadcastedMessage:
  3452  			return predicate(msg.msg)
  3453  		case <-time.After(2 * trickleDelay):
  3454  			return fmt.Errorf("no message broadcast")
  3455  		}
  3456  	}, time.Second*5)
  3457  	if err != nil {
  3458  		t.Fatal(err)
  3459  	}
  3460  }
  3461  
  3462  // TestPropagateChanPolicyUpdate tests that we're able to issue requests to
  3463  // update policies for all channels and also select target channels.
  3464  // Additionally, we ensure that we don't propagate updates for any private
  3465  // channels.
  3466  func TestPropagateChanPolicyUpdate(t *testing.T) {
  3467  	t.Parallel()
  3468  
  3469  	// First, we'll make out test context and add 3 random channels to the
  3470  	// graph.
  3471  	startingHeight := uint32(10)
  3472  	ctx, cleanup, err := createTestCtx(startingHeight)
  3473  	if err != nil {
  3474  		t.Fatalf("unable to create test context: %v", err)
  3475  	}
  3476  	defer cleanup()
  3477  
  3478  	const numChannels = 3
  3479  	channelsToAnnounce := make([]*annBatch, 0, numChannels)
  3480  	for i := 0; i < numChannels; i++ {
  3481  		newChan, err := createLocalAnnouncements(uint32(i + 1))
  3482  		if err != nil {
  3483  			t.Fatalf("unable to make new channel ann: %v", err)
  3484  		}
  3485  
  3486  		channelsToAnnounce = append(channelsToAnnounce, newChan)
  3487  	}
  3488  
  3489  	remoteKey := remoteKeyPriv1.PubKey()
  3490  
  3491  	sentMsgs := make(chan lnwire.Message, 10)
  3492  	remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
  3493  
  3494  	// The forced code path for sending the private ChannelUpdate to the
  3495  	// remote peer will be hit, forcing it to request a notification that
  3496  	// the remote peer is active. We'll ensure that it targets the proper
  3497  	// pubkey, and hand it our mock peer above.
  3498  	notifyErr := make(chan error, 1)
  3499  	ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(
  3500  		targetPub [33]byte, peerChan chan<- lnpeer.Peer) {
  3501  
  3502  		if !bytes.Equal(targetPub[:], remoteKey.SerializeCompressed()) {
  3503  			notifyErr <- fmt.Errorf("reliableSender attempted to send the "+
  3504  				"message to the wrong peer: expected %x got %x",
  3505  				remoteKey.SerializeCompressed(),
  3506  				targetPub)
  3507  		}
  3508  
  3509  		peerChan <- remotePeer
  3510  	}
  3511  
  3512  	// With our channel announcements created, we'll now send them all to
  3513  	// the gossiper in order for it to process. However, we'll hold back
  3514  	// the channel ann proof from the first channel in order to have it be
  3515  	// marked as private channel.
  3516  	firstChanID := channelsToAnnounce[0].chanAnn.ShortChannelID
  3517  	for i, batch := range channelsToAnnounce {
  3518  		// channelPoint ensures that each channel policy in the map
  3519  		// returned by PropagateChanPolicyUpdate has a unique key. Since
  3520  		// the map is keyed by wire.OutPoint, we want to ensure that
  3521  		// each channel has a unique channel point.
  3522  		channelPoint := ChannelPoint(wire.OutPoint{Index: uint32(i)})
  3523  
  3524  		sendLocalMsg(t, ctx, batch.chanAnn, channelPoint)
  3525  		sendLocalMsg(t, ctx, batch.chanUpdAnn1)
  3526  		sendLocalMsg(t, ctx, batch.nodeAnn1)
  3527  
  3528  		sendRemoteMsg(t, ctx, batch.chanUpdAnn2, remotePeer)
  3529  		sendRemoteMsg(t, ctx, batch.nodeAnn2, remotePeer)
  3530  
  3531  		// We'll skip sending the auth proofs from the first channel to
  3532  		// ensure that it's seen as a private channel.
  3533  		if batch.chanAnn.ShortChannelID == firstChanID {
  3534  			continue
  3535  		}
  3536  
  3537  		sendLocalMsg(t, ctx, batch.localProofAnn)
  3538  		sendRemoteMsg(t, ctx, batch.remoteProofAnn, remotePeer)
  3539  	}
  3540  
  3541  	// Drain out any broadcast or direct messages we might not have read up
  3542  	// to this point. We'll also check out notifyErr to detect if the
  3543  	// reliable sender had an issue sending to the remote peer.
  3544  out:
  3545  	for {
  3546  		select {
  3547  		case <-ctx.broadcastedMessage:
  3548  		case <-sentMsgs:
  3549  		case err := <-notifyErr:
  3550  			t.Fatal(err)
  3551  		default:
  3552  			break out
  3553  		}
  3554  	}
  3555  
  3556  	// Now that all of our channels are loaded, we'll attempt to update the
  3557  	// policy of all of them.
  3558  	const newTimeLockDelta = 100
  3559  	var edgesToUpdate []EdgeWithInfo
  3560  	err = ctx.router.ForAllOutgoingChannels(func(
  3561  		_ kvdb.RTx,
  3562  		info *channeldb.ChannelEdgeInfo,
  3563  		edge *channeldb.ChannelEdgePolicy) error {
  3564  
  3565  		edge.TimeLockDelta = uint16(newTimeLockDelta)
  3566  		edgesToUpdate = append(edgesToUpdate, EdgeWithInfo{
  3567  			Info: info,
  3568  			Edge: edge,
  3569  		})
  3570  
  3571  		return nil
  3572  	})
  3573  	if err != nil {
  3574  		t.Fatal(err)
  3575  	}
  3576  
  3577  	err = ctx.gossiper.PropagateChanPolicyUpdate(edgesToUpdate)
  3578  	if err != nil {
  3579  		t.Fatalf("unable to chan policies: %v", err)
  3580  	}
  3581  
  3582  	// Two channel updates should now be broadcast, with neither of them
  3583  	// being the channel our first private channel.
  3584  	for i := 0; i < numChannels-1; i++ {
  3585  		assertBroadcastMsg(t, ctx, func(msg lnwire.Message) error {
  3586  			upd, ok := msg.(*lnwire.ChannelUpdate)
  3587  			if !ok {
  3588  				return fmt.Errorf("channel update not "+
  3589  					"broadcast, instead %T was", msg)
  3590  			}
  3591  
  3592  			if upd.ShortChannelID == firstChanID {
  3593  				return fmt.Errorf("private channel upd " +
  3594  					"broadcast")
  3595  			}
  3596  			if upd.TimeLockDelta != newTimeLockDelta {
  3597  				return fmt.Errorf("wrong delta: expected %v, "+
  3598  					"got %v", newTimeLockDelta,
  3599  					upd.TimeLockDelta)
  3600  			}
  3601  
  3602  			return nil
  3603  		})
  3604  	}
  3605  
  3606  	// Finally the ChannelUpdate should have been sent directly to the
  3607  	// remote peer via the reliable sender.
  3608  	select {
  3609  	case msg := <-sentMsgs:
  3610  		upd, ok := msg.(*lnwire.ChannelUpdate)
  3611  		if !ok {
  3612  			t.Fatalf("channel update not "+
  3613  				"broadcast, instead %T was", msg)
  3614  		}
  3615  		if upd.TimeLockDelta != newTimeLockDelta {
  3616  			t.Fatalf("wrong delta: expected %v, "+
  3617  				"got %v", newTimeLockDelta,
  3618  				upd.TimeLockDelta)
  3619  		}
  3620  		if upd.ShortChannelID != firstChanID {
  3621  			t.Fatalf("private channel upd " +
  3622  				"broadcast")
  3623  		}
  3624  	case <-time.After(time.Second * 5):
  3625  		t.Fatalf("message not sent directly to peer")
  3626  	}
  3627  
  3628  	// At this point, no other ChannelUpdate messages should be broadcast
  3629  	// as we sent the two public ones to the network, and the private one
  3630  	// was sent directly to the peer.
  3631  	for {
  3632  		select {
  3633  		case msg := <-ctx.broadcastedMessage:
  3634  			if upd, ok := msg.msg.(*lnwire.ChannelUpdate); ok {
  3635  				if upd.ShortChannelID == firstChanID {
  3636  					t.Fatalf("chan update msg received: %v",
  3637  						spew.Sdump(msg))
  3638  				}
  3639  			}
  3640  		default:
  3641  			return
  3642  		}
  3643  	}
  3644  }
  3645  
  3646  // TestProcessChannelAnnouncementOptionalMsgFields ensures that the gossiper can
  3647  // properly handled optional message fields provided by the caller when
  3648  // processing a channel announcement.
  3649  func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) {
  3650  	t.Parallel()
  3651  
  3652  	// We'll start by creating our test context and a set of test channel
  3653  	// announcements.
  3654  	ctx, cleanup, err := createTestCtx(0)
  3655  	if err != nil {
  3656  		t.Fatalf("unable to create test context: %v", err)
  3657  	}
  3658  	defer cleanup()
  3659  
  3660  	chanAnn1 := createAnnouncementWithoutProof(
  3661  		100, selfKeyDesc.PubKey, remoteKeyPub1,
  3662  	)
  3663  	chanAnn2 := createAnnouncementWithoutProof(
  3664  		101, selfKeyDesc.PubKey, remoteKeyPub1,
  3665  	)
  3666  
  3667  	// assertOptionalMsgFields is a helper closure that ensures the optional
  3668  	// message fields were set as intended.
  3669  	assertOptionalMsgFields := func(chanID lnwire.ShortChannelID,
  3670  		capacity dcrutil.Amount, channelPoint wire.OutPoint) {
  3671  
  3672  		t.Helper()
  3673  
  3674  		edge, _, _, err := ctx.router.GetChannelByID(chanID)
  3675  		if err != nil {
  3676  			t.Fatalf("unable to get channel by id: %v", err)
  3677  		}
  3678  		if edge.Capacity != capacity {
  3679  			t.Fatalf("expected capacity %v, got %v", capacity,
  3680  				edge.Capacity)
  3681  		}
  3682  		if edge.ChannelPoint != channelPoint {
  3683  			t.Fatalf("expected channel point %v, got %v",
  3684  				channelPoint, edge.ChannelPoint)
  3685  		}
  3686  	}
  3687  
  3688  	// We'll process the first announcement without any optional fields. We
  3689  	// should see the channel's capacity and outpoint have a zero value.
  3690  	sendLocalMsg(t, ctx, chanAnn1)
  3691  	assertOptionalMsgFields(chanAnn1.ShortChannelID, 0, wire.OutPoint{})
  3692  
  3693  	// Providing the capacity and channel point as optional fields should
  3694  	// propagate them all the way down to the router.
  3695  	capacity := dcrutil.Amount(1000)
  3696  	channelPoint := wire.OutPoint{Index: 1}
  3697  	sendLocalMsg(
  3698  		t, ctx, chanAnn2, ChannelCapacity(capacity),
  3699  		ChannelPoint(channelPoint),
  3700  	)
  3701  	assertOptionalMsgFields(chanAnn2.ShortChannelID, capacity, channelPoint)
  3702  }
  3703  
  3704  func assertMessage(t *testing.T, expected, got lnwire.Message) {
  3705  	t.Helper()
  3706  
  3707  	if !reflect.DeepEqual(expected, got) {
  3708  		t.Fatalf("expected: %v\ngot: %v", spew.Sdump(expected),
  3709  			spew.Sdump(got))
  3710  	}
  3711  }
  3712  
  3713  // TestSplitAnnouncementsCorrectSubBatches checks that we split a given
  3714  // sizes of announcement list into the correct number of batches.
  3715  func TestSplitAnnouncementsCorrectSubBatches(t *testing.T) {
  3716  	t.Parallel()
  3717  
  3718  	const subBatchSize = 10
  3719  
  3720  	announcementBatchSizes := []int{2, 5, 20, 45, 80, 100, 1005}
  3721  	expectedNumberMiniBatches := []int{1, 1, 2, 5, 8, 10, 101}
  3722  
  3723  	lengthAnnouncementBatchSizes := len(announcementBatchSizes)
  3724  	lengthExpectedNumberMiniBatches := len(expectedNumberMiniBatches)
  3725  
  3726  	if lengthAnnouncementBatchSizes != lengthExpectedNumberMiniBatches {
  3727  		t.Fatal("Length of announcementBatchSizes and " +
  3728  			"expectedNumberMiniBatches should be equal")
  3729  	}
  3730  
  3731  	for testIndex := range announcementBatchSizes {
  3732  		var batchSize = announcementBatchSizes[testIndex]
  3733  		announcementBatch := make([]msgWithSenders, batchSize)
  3734  
  3735  		splitAnnouncementBatch := splitAnnouncementBatches(
  3736  			subBatchSize, announcementBatch,
  3737  		)
  3738  
  3739  		lengthMiniBatches := len(splitAnnouncementBatch)
  3740  
  3741  		if lengthMiniBatches != expectedNumberMiniBatches[testIndex] {
  3742  			t.Fatalf("Expecting %d mini batches, actual %d",
  3743  				expectedNumberMiniBatches[testIndex], lengthMiniBatches)
  3744  		}
  3745  	}
  3746  }
  3747  
  3748  func assertCorrectSubBatchSize(t *testing.T, expectedSubBatchSize,
  3749  	actualSubBatchSize int) {
  3750  
  3751  	t.Helper()
  3752  
  3753  	if actualSubBatchSize != expectedSubBatchSize {
  3754  		t.Fatalf("Expecting subBatch size of %d, actual %d",
  3755  			expectedSubBatchSize, actualSubBatchSize)
  3756  	}
  3757  }
  3758  
  3759  // TestCalculateCorrectSubBatchSize checks that we check the correct
  3760  // sub batch size for each of the input vectors of batch sizes.
  3761  func TestCalculateCorrectSubBatchSizes(t *testing.T) {
  3762  	t.Parallel()
  3763  
  3764  	const minimumSubBatchSize = 10
  3765  	const batchDelay = time.Duration(100)
  3766  	const subBatchDelay = time.Duration(10)
  3767  
  3768  	batchSizes := []int{2, 200, 250, 305, 352, 10010, 1000001}
  3769  	expectedSubBatchSize := []int{10, 20, 25, 31, 36, 1001, 100001}
  3770  
  3771  	for testIndex := range batchSizes {
  3772  		batchSize := batchSizes[testIndex]
  3773  		expectedBatchSize := expectedSubBatchSize[testIndex]
  3774  
  3775  		actualSubBatchSize := calculateSubBatchSize(
  3776  			batchDelay, subBatchDelay, minimumSubBatchSize, batchSize,
  3777  		)
  3778  
  3779  		assertCorrectSubBatchSize(t, expectedBatchSize, actualSubBatchSize)
  3780  	}
  3781  }
  3782  
  3783  // TestCalculateCorrectSubBatchSizesDifferentDelay checks that we check the
  3784  // correct sub batch size for each of different delay.
  3785  func TestCalculateCorrectSubBatchSizesDifferentDelay(t *testing.T) {
  3786  	t.Parallel()
  3787  
  3788  	const batchSize = 100
  3789  	const minimumSubBatchSize = 10
  3790  
  3791  	batchDelays := []time.Duration{100, 50, 20, 25, 5, 0}
  3792  	const subBatchDelay = 10
  3793  
  3794  	expectedSubBatchSize := []int{10, 20, 50, 40, 100, 100}
  3795  
  3796  	for testIndex := range batchDelays {
  3797  		batchDelay := batchDelays[testIndex]
  3798  		expectedBatchSize := expectedSubBatchSize[testIndex]
  3799  
  3800  		actualSubBatchSize := calculateSubBatchSize(
  3801  			batchDelay, subBatchDelay, minimumSubBatchSize, batchSize,
  3802  		)
  3803  
  3804  		assertCorrectSubBatchSize(t, expectedBatchSize, actualSubBatchSize)
  3805  	}
  3806  }
  3807  
  3808  // markGraphSynced allows us to report that the initial historical sync has
  3809  // completed.
  3810  func (m *SyncManager) markGraphSyncing() {
  3811  	atomic.StoreInt32(&m.initialHistoricalSyncCompleted, 0)
  3812  }
  3813  
  3814  // TestBroadcastAnnsAfterGraphSynced ensures that we only broadcast
  3815  // announcements after the graph has been considered as synced, i.e., after our
  3816  // initial historical sync has completed.
  3817  func TestBroadcastAnnsAfterGraphSynced(t *testing.T) {
  3818  	t.Parallel()
  3819  
  3820  	ctx, cleanup, err := createTestCtx(10)
  3821  	if err != nil {
  3822  		t.Fatalf("can't create context: %v", err)
  3823  	}
  3824  	defer cleanup()
  3825  
  3826  	// We'll mark the graph as not synced. This should prevent us from
  3827  	// broadcasting any messages we've received as part of our initial
  3828  	// historical sync.
  3829  	ctx.gossiper.syncMgr.markGraphSyncing()
  3830  
  3831  	assertBroadcast := func(msg lnwire.Message, isRemote bool,
  3832  		shouldBroadcast bool) {
  3833  
  3834  		t.Helper()
  3835  
  3836  		nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
  3837  		var errChan chan error
  3838  		if isRemote {
  3839  			errChan = ctx.gossiper.ProcessRemoteAnnouncement(
  3840  				msg, nodePeer,
  3841  			)
  3842  		} else {
  3843  			errChan = ctx.gossiper.ProcessLocalAnnouncement(msg)
  3844  		}
  3845  
  3846  		select {
  3847  		case err := <-errChan:
  3848  			if err != nil {
  3849  				t.Fatalf("unable to process gossip message: %v",
  3850  					err)
  3851  			}
  3852  		case <-time.After(2 * time.Second):
  3853  			t.Fatal("gossip message not processed")
  3854  		}
  3855  
  3856  		select {
  3857  		case <-ctx.broadcastedMessage:
  3858  			if !shouldBroadcast {
  3859  				t.Fatal("gossip message was broadcast")
  3860  			}
  3861  		case <-time.After(2 * trickleDelay):
  3862  			if shouldBroadcast {
  3863  				t.Fatal("gossip message wasn't broadcast")
  3864  			}
  3865  		}
  3866  	}
  3867  
  3868  	// A remote channel announcement should not be broadcast since the graph
  3869  	// has not yet been synced.
  3870  	chanAnn1, err := createRemoteChannelAnnouncement(0)
  3871  	if err != nil {
  3872  		t.Fatalf("unable to create channel announcement: %v", err)
  3873  	}
  3874  	assertBroadcast(chanAnn1, true, false)
  3875  
  3876  	// A local channel announcement should be broadcast though, regardless
  3877  	// of whether we've synced our graph or not.
  3878  	chanUpd, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, 1)
  3879  	if err != nil {
  3880  		t.Fatalf("unable to create channel announcement: %v", err)
  3881  	}
  3882  	assertBroadcast(chanUpd, false, true)
  3883  
  3884  	// Mark the graph as synced, which should allow the channel announcement
  3885  	// should to be broadcast.
  3886  	ctx.gossiper.syncMgr.markGraphSynced()
  3887  
  3888  	chanAnn2, err := createRemoteChannelAnnouncement(1)
  3889  	if err != nil {
  3890  		t.Fatalf("unable to create channel announcement: %v", err)
  3891  	}
  3892  	assertBroadcast(chanAnn2, true, true)
  3893  }
  3894  
  3895  // TestRateLimitChannelUpdates ensures that we properly rate limit incoming
  3896  // channel updates.
  3897  func TestRateLimitChannelUpdates(t *testing.T) {
  3898  	t.Parallel()
  3899  
  3900  	// Create our test harness.
  3901  	const blockHeight = 100
  3902  	ctx, cleanup, err := createTestCtx(blockHeight)
  3903  	if err != nil {
  3904  		t.Fatalf("can't create context: %v", err)
  3905  	}
  3906  	defer cleanup()
  3907  	ctx.gossiper.cfg.RebroadcastInterval = time.Hour
  3908  	ctx.gossiper.cfg.MaxChannelUpdateBurst = 5
  3909  	ctx.gossiper.cfg.ChannelUpdateInterval = 5 * time.Second
  3910  
  3911  	// The graph should start empty.
  3912  	require.Empty(t, ctx.router.infos)
  3913  	require.Empty(t, ctx.router.edges)
  3914  
  3915  	// We'll create a batch of signed announcements, including updates for
  3916  	// both sides, for a channel and process them. They should all be
  3917  	// forwarded as this is our first time learning about the channel.
  3918  	batch, err := createRemoteAnnouncements(blockHeight)
  3919  	require.NoError(t, err)
  3920  
  3921  	nodePeer1 := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
  3922  	select {
  3923  	case err := <-ctx.gossiper.ProcessRemoteAnnouncement(
  3924  		batch.chanAnn, nodePeer1,
  3925  	):
  3926  		require.NoError(t, err)
  3927  	case <-time.After(time.Second):
  3928  		t.Fatal("remote announcement not processed")
  3929  	}
  3930  
  3931  	select {
  3932  	case err := <-ctx.gossiper.ProcessRemoteAnnouncement(
  3933  		batch.chanUpdAnn1, nodePeer1,
  3934  	):
  3935  		require.NoError(t, err)
  3936  	case <-time.After(time.Second):
  3937  		t.Fatal("remote announcement not processed")
  3938  	}
  3939  
  3940  	nodePeer2 := &mockPeer{remoteKeyPriv2.PubKey(), nil, nil}
  3941  	select {
  3942  	case err := <-ctx.gossiper.ProcessRemoteAnnouncement(
  3943  		batch.chanUpdAnn2, nodePeer2,
  3944  	):
  3945  		require.NoError(t, err)
  3946  	case <-time.After(time.Second):
  3947  		t.Fatal("remote announcement not processed")
  3948  	}
  3949  
  3950  	timeout := time.After(2 * trickleDelay)
  3951  	for i := 0; i < 3; i++ {
  3952  		select {
  3953  		case <-ctx.broadcastedMessage:
  3954  		case <-timeout:
  3955  			t.Fatal("expected announcement to be broadcast")
  3956  		}
  3957  	}
  3958  
  3959  	shortChanID := batch.chanAnn.ShortChannelID.ToUint64()
  3960  	require.Contains(t, ctx.router.infos, shortChanID)
  3961  	require.Contains(t, ctx.router.edges, shortChanID)
  3962  
  3963  	// We'll define a helper to assert whether updates should be rate
  3964  	// limited or not depending on their contents.
  3965  	assertRateLimit := func(update *lnwire.ChannelUpdate, peer lnpeer.Peer,
  3966  		shouldRateLimit bool) {
  3967  
  3968  		t.Helper()
  3969  
  3970  		select {
  3971  		case err := <-ctx.gossiper.ProcessRemoteAnnouncement(update, peer):
  3972  			require.NoError(t, err)
  3973  		case <-time.After(time.Second):
  3974  			t.Fatal("remote announcement not processed")
  3975  		}
  3976  
  3977  		select {
  3978  		case <-ctx.broadcastedMessage:
  3979  			if shouldRateLimit {
  3980  				t.Fatal("unexpected channel update broadcast")
  3981  			}
  3982  		case <-time.After(2 * trickleDelay):
  3983  			if !shouldRateLimit {
  3984  				t.Fatal("expected channel update broadcast")
  3985  			}
  3986  		}
  3987  	}
  3988  
  3989  	// We'll start with the keep alive case.
  3990  	//
  3991  	// We rate limit any keep alive updates that have not at least spanned
  3992  	// our rebroadcast interval.
  3993  	rateLimitKeepAliveUpdate := *batch.chanUpdAnn1
  3994  	rateLimitKeepAliveUpdate.Timestamp++
  3995  	require.NoError(t, signUpdate(remoteKeyPriv1, &rateLimitKeepAliveUpdate))
  3996  	assertRateLimit(&rateLimitKeepAliveUpdate, nodePeer1, true)
  3997  
  3998  	keepAliveUpdate := *batch.chanUpdAnn1
  3999  	keepAliveUpdate.Timestamp = uint32(
  4000  		time.Unix(int64(batch.chanUpdAnn1.Timestamp), 0).
  4001  			Add(ctx.gossiper.cfg.RebroadcastInterval).Unix(),
  4002  	)
  4003  	require.NoError(t, signUpdate(remoteKeyPriv1, &keepAliveUpdate))
  4004  	assertRateLimit(&keepAliveUpdate, nodePeer1, false)
  4005  
  4006  	// Then, we'll move on to the non keep alive cases.
  4007  	//
  4008  	// For this test, non keep alive updates are rate limited to one per 5
  4009  	// seconds with a max burst of 5 per direction. We'll process the max
  4010  	// burst of one direction first. None of these should be rate limited.
  4011  	updateSameDirection := keepAliveUpdate
  4012  	for i := uint32(0); i < uint32(ctx.gossiper.cfg.MaxChannelUpdateBurst); i++ {
  4013  		updateSameDirection.Timestamp++
  4014  		updateSameDirection.BaseFee++
  4015  		require.NoError(t, signUpdate(remoteKeyPriv1, &updateSameDirection))
  4016  		assertRateLimit(&updateSameDirection, nodePeer1, false)
  4017  	}
  4018  
  4019  	// Following with another update should be rate limited as the max burst
  4020  	// has been reached and we haven't ticked at the next interval yet.
  4021  	updateSameDirection.Timestamp++
  4022  	updateSameDirection.BaseFee++
  4023  	require.NoError(t, signUpdate(remoteKeyPriv1, &updateSameDirection))
  4024  	assertRateLimit(&updateSameDirection, nodePeer1, true)
  4025  
  4026  	// An update for the other direction should not be rate limited.
  4027  	updateDiffDirection := *batch.chanUpdAnn2
  4028  	updateDiffDirection.Timestamp++
  4029  	updateDiffDirection.BaseFee++
  4030  	require.NoError(t, signUpdate(remoteKeyPriv2, &updateDiffDirection))
  4031  	assertRateLimit(&updateDiffDirection, nodePeer2, false)
  4032  
  4033  	// Wait for the next interval to tick. Since we've only waited for one,
  4034  	// only one more update is allowed.
  4035  	<-time.After(ctx.gossiper.cfg.ChannelUpdateInterval)
  4036  	for i := 0; i < ctx.gossiper.cfg.MaxChannelUpdateBurst; i++ {
  4037  		updateSameDirection.Timestamp++
  4038  		updateSameDirection.BaseFee++
  4039  		require.NoError(t, signUpdate(remoteKeyPriv1, &updateSameDirection))
  4040  
  4041  		shouldRateLimit := i != 0
  4042  		assertRateLimit(&updateSameDirection, nodePeer1, shouldRateLimit)
  4043  	}
  4044  }
  4045  
  4046  // TestIgnoreOwnAnnouncement tests that the gossiper will ignore announcements
  4047  // about our own channels when coming from a remote peer.
  4048  func TestIgnoreOwnAnnouncement(t *testing.T) {
  4049  	t.Parallel()
  4050  
  4051  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  4052  	if err != nil {
  4053  		t.Fatalf("can't create context: %v", err)
  4054  	}
  4055  	defer cleanup()
  4056  
  4057  	batch, err := createLocalAnnouncements(0)
  4058  	if err != nil {
  4059  		t.Fatalf("can't generate announcements: %v", err)
  4060  	}
  4061  
  4062  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  4063  	if err != nil {
  4064  		t.Fatalf("unable to parse pubkey: %v", err)
  4065  	}
  4066  	remotePeer := &mockPeer{remoteKey, nil, nil}
  4067  
  4068  	// Try to let the remote peer tell us about the channel we are part of.
  4069  	select {
  4070  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  4071  		batch.chanAnn, remotePeer,
  4072  	):
  4073  	case <-time.After(2 * time.Second):
  4074  		t.Fatal("did not process remote announcement")
  4075  	}
  4076  	// It should be ignored, since the gossiper only cares about local
  4077  	// announcements for its own channels.
  4078  	if err == nil || !strings.Contains(err.Error(), "ignoring") {
  4079  		t.Fatalf("expected gossiper to ignore announcement, got: %v", err)
  4080  	}
  4081  
  4082  	// Now do the local channelannouncement, node announcement, and channel
  4083  	// update. No messages should be brodcasted yet, since we don't have
  4084  	// the announcement signatures.
  4085  	select {
  4086  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
  4087  	case <-time.After(2 * time.Second):
  4088  		t.Fatal("did not process local announcement")
  4089  	}
  4090  	if err != nil {
  4091  		t.Fatalf("unable to process channel ann: %v", err)
  4092  	}
  4093  	select {
  4094  	case <-ctx.broadcastedMessage:
  4095  		t.Fatal("channel announcement was broadcast")
  4096  	case <-time.After(2 * trickleDelay):
  4097  	}
  4098  
  4099  	select {
  4100  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
  4101  	case <-time.After(2 * time.Second):
  4102  		t.Fatal("did not process local announcement")
  4103  	}
  4104  	if err != nil {
  4105  		t.Fatalf("unable to process channel update: %v", err)
  4106  	}
  4107  	select {
  4108  	case <-ctx.broadcastedMessage:
  4109  		t.Fatal("channel update announcement was broadcast")
  4110  	case <-time.After(2 * trickleDelay):
  4111  	}
  4112  
  4113  	select {
  4114  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1):
  4115  	case <-time.After(2 * time.Second):
  4116  		t.Fatal("did not process local announcement")
  4117  	}
  4118  	if err != nil {
  4119  		t.Fatalf("unable to process node ann: %v", err)
  4120  	}
  4121  	select {
  4122  	case <-ctx.broadcastedMessage:
  4123  		t.Fatal("node announcement was broadcast")
  4124  	case <-time.After(2 * trickleDelay):
  4125  	}
  4126  
  4127  	// We should accept the remote's channel update and node announcement.
  4128  	select {
  4129  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  4130  		batch.chanUpdAnn2, remotePeer,
  4131  	):
  4132  	case <-time.After(2 * time.Second):
  4133  		t.Fatal("did not process remote announcement")
  4134  	}
  4135  	if err != nil {
  4136  		t.Fatalf("unable to process channel update: %v", err)
  4137  	}
  4138  	select {
  4139  	case <-ctx.broadcastedMessage:
  4140  		t.Fatal("channel update announcement was broadcast")
  4141  	case <-time.After(2 * trickleDelay):
  4142  	}
  4143  
  4144  	select {
  4145  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  4146  		batch.nodeAnn2, remotePeer,
  4147  	):
  4148  	case <-time.After(2 * time.Second):
  4149  		t.Fatal("did not process remote announcement")
  4150  	}
  4151  	if err != nil {
  4152  		t.Fatalf("unable to process node ann: %v", err)
  4153  	}
  4154  	select {
  4155  	case <-ctx.broadcastedMessage:
  4156  		t.Fatal("node announcement was broadcast")
  4157  	case <-time.After(2 * trickleDelay):
  4158  	}
  4159  
  4160  	// Now we exchange the proofs, the messages will be broadcasted to the
  4161  	// network.
  4162  	select {
  4163  	case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn):
  4164  	case <-time.After(2 * time.Second):
  4165  		t.Fatal("did not process remote announcement")
  4166  	}
  4167  	if err != nil {
  4168  		t.Fatalf("unable to process local proof: %v", err)
  4169  	}
  4170  
  4171  	select {
  4172  	case <-ctx.broadcastedMessage:
  4173  		t.Fatal("announcements were broadcast")
  4174  	case <-time.After(2 * trickleDelay):
  4175  	}
  4176  
  4177  	select {
  4178  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  4179  		batch.remoteProofAnn, remotePeer,
  4180  	):
  4181  	case <-time.After(2 * time.Second):
  4182  		t.Fatal("did not process remote announcement")
  4183  	}
  4184  	if err != nil {
  4185  		t.Fatalf("unable to process remote proof: %v", err)
  4186  	}
  4187  
  4188  	for i := 0; i < 5; i++ {
  4189  		select {
  4190  		case <-ctx.broadcastedMessage:
  4191  		case <-time.After(time.Second):
  4192  			t.Fatal("announcement wasn't broadcast")
  4193  		}
  4194  	}
  4195  
  4196  	// Finally, we again check that we'll ignore the remote giving us
  4197  	// announcements about our own channel.
  4198  	select {
  4199  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  4200  		batch.chanAnn, remotePeer,
  4201  	):
  4202  	case <-time.After(2 * time.Second):
  4203  		t.Fatal("did not process remote announcement")
  4204  	}
  4205  	if err == nil || !strings.Contains(err.Error(), "ignoring") {
  4206  		t.Fatalf("expected gossiper to ignore announcement, got: %v", err)
  4207  	}
  4208  }
  4209  
  4210  // TestRejectCacheChannelAnn checks that if we reject a channel announcement,
  4211  // then if we attempt to validate it again, we'll reject it with the proper
  4212  // error.
  4213  func TestRejectCacheChannelAnn(t *testing.T) {
  4214  	t.Parallel()
  4215  
  4216  	ctx, cleanup, err := createTestCtx(proofMatureDelta)
  4217  	if err != nil {
  4218  		t.Fatalf("can't create context: %v", err)
  4219  	}
  4220  	defer cleanup()
  4221  
  4222  	// First, we create a channel announcement to send over to our test
  4223  	// peer.
  4224  	batch, err := createRemoteAnnouncements(0)
  4225  	if err != nil {
  4226  		t.Fatalf("can't generate announcements: %v", err)
  4227  	}
  4228  
  4229  	remoteKey, err := secp256k1.ParsePubKey(batch.nodeAnn2.NodeID[:])
  4230  	if err != nil {
  4231  		t.Fatalf("unable to parse pubkey: %v", err)
  4232  	}
  4233  	remotePeer := &mockPeer{remoteKey, nil, nil}
  4234  
  4235  	// Before sending over the announcement, we'll modify it such that we
  4236  	// know it will always fail.
  4237  	chanID := batch.chanAnn.ShortChannelID.ToUint64()
  4238  	ctx.router.queueValidationFail(chanID)
  4239  
  4240  	// If we process the batch the first time we should get an error.
  4241  	select {
  4242  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  4243  		batch.chanAnn, remotePeer,
  4244  	):
  4245  		require.NotNil(t, err)
  4246  	case <-time.After(2 * time.Second):
  4247  		t.Fatal("did not process remote announcement")
  4248  	}
  4249  
  4250  	// If we process it a *second* time, then we should get an error saying
  4251  	// we rejected it already.
  4252  	select {
  4253  	case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
  4254  		batch.chanAnn, remotePeer,
  4255  	):
  4256  		errStr := err.Error()
  4257  		require.Contains(t, errStr, "recently rejected")
  4258  	case <-time.After(2 * time.Second):
  4259  		t.Fatal("did not process remote announcement")
  4260  	}
  4261  }