github.com/decred/dcrlnd@v0.7.6/discovery/gossiper.go (about)

     1  package discovery
     2  
     3  import (
     4  	"bytes"
     5  	"errors"
     6  	"fmt"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/davecgh/go-spew/spew"
    11  	"github.com/decred/dcrd/chaincfg/chainhash"
    12  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    13  	"github.com/decred/dcrd/dcrutil/v4"
    14  	"github.com/decred/dcrd/wire"
    15  	"golang.org/x/time/rate"
    16  
    17  	"github.com/decred/dcrlnd/batch"
    18  	"github.com/decred/dcrlnd/chainntnfs"
    19  	"github.com/decred/dcrlnd/channeldb"
    20  	"github.com/decred/dcrlnd/keychain"
    21  	"github.com/decred/dcrlnd/kvdb"
    22  	"github.com/decred/dcrlnd/lnpeer"
    23  	"github.com/decred/dcrlnd/lnwallet"
    24  	"github.com/decred/dcrlnd/lnwire"
    25  	"github.com/decred/dcrlnd/multimutex"
    26  	"github.com/decred/dcrlnd/netann"
    27  	cache "github.com/decred/dcrlnd/neutrinocache"
    28  	"github.com/decred/dcrlnd/neutrinocache/lru"
    29  	"github.com/decred/dcrlnd/routing"
    30  	"github.com/decred/dcrlnd/routing/route"
    31  	"github.com/decred/dcrlnd/ticker"
    32  )
    33  
    34  const (
    35  	// DefaultMaxChannelUpdateBurst is the default maximum number of updates
    36  	// for a specific channel and direction that we'll accept over an
    37  	// interval.
    38  	DefaultMaxChannelUpdateBurst = 10
    39  
    40  	// DefaultChannelUpdateInterval is the default interval we'll use to
    41  	// determine how often we should allow a new update for a specific
    42  	// channel and direction.
    43  	DefaultChannelUpdateInterval = time.Minute
    44  
    45  	// maxPrematureUpdates tracks the max amount of premature channel
    46  	// updates that we'll hold onto.
    47  	maxPrematureUpdates = 100
    48  
    49  	// maxRejectedUpdates tracks the max amount of rejected channel updates
    50  	// we'll maintain. This is the global size across all peers. We'll
    51  	// allocate ~3 MB max to the cache.
    52  	maxRejectedUpdates = 10_000
    53  )
    54  
    55  var (
    56  	// ErrGossiperShuttingDown is an error that is returned if the gossiper
    57  	// is in the process of being shut down.
    58  	ErrGossiperShuttingDown = errors.New("gossiper is shutting down")
    59  
    60  	// ErrGossipSyncerNotFound signals that we were unable to find an active
    61  	// gossip syncer corresponding to a gossip query message received from
    62  	// the remote peer.
    63  	ErrGossipSyncerNotFound = errors.New("gossip syncer not found")
    64  
    65  	// emptyPubkey is used to compare compressed pubkeys against an empty
    66  	// byte array.
    67  	emptyPubkey [33]byte
    68  )
    69  
    70  // optionalMsgFields is a set of optional message fields that external callers
    71  // can provide that serve useful when processing a specific network
    72  // announcement.
    73  type optionalMsgFields struct {
    74  	capacity     *dcrutil.Amount
    75  	channelPoint *wire.OutPoint
    76  }
    77  
    78  // apply applies the optional fields within the functional options.
    79  func (f *optionalMsgFields) apply(optionalMsgFields ...OptionalMsgField) {
    80  	for _, optionalMsgField := range optionalMsgFields {
    81  		optionalMsgField(f)
    82  	}
    83  }
    84  
    85  // OptionalMsgField is a functional option parameter that can be used to provide
    86  // external information that is not included within a network message but serves
    87  // useful when processing it.
    88  type OptionalMsgField func(*optionalMsgFields)
    89  
    90  // ChannelCapacity is an optional field that lets the gossiper know of the
    91  // capacity of a channel.
    92  func ChannelCapacity(capacity dcrutil.Amount) OptionalMsgField {
    93  	return func(f *optionalMsgFields) {
    94  		f.capacity = &capacity
    95  	}
    96  }
    97  
    98  // ChannelPoint is an optional field that lets the gossiper know of the outpoint
    99  // of a channel.
   100  func ChannelPoint(op wire.OutPoint) OptionalMsgField {
   101  	return func(f *optionalMsgFields) {
   102  		f.channelPoint = &op
   103  	}
   104  }
   105  
   106  // networkMsg couples a routing related wire message with the peer that
   107  // originally sent it.
   108  type networkMsg struct {
   109  	peer              lnpeer.Peer
   110  	source            *secp256k1.PublicKey
   111  	msg               lnwire.Message
   112  	optionalMsgFields *optionalMsgFields
   113  
   114  	isRemote bool
   115  
   116  	err chan error
   117  }
   118  
   119  // chanPolicyUpdateRequest is a request that is sent to the server when a caller
   120  // wishes to update a particular set of channels. New ChannelUpdate messages
   121  // will be crafted to be sent out during the next broadcast epoch and the fee
   122  // updates committed to the lower layer.
   123  type chanPolicyUpdateRequest struct {
   124  	edgesToUpdate []EdgeWithInfo
   125  	errChan       chan error
   126  }
   127  
   128  // PinnedSyncers is a set of node pubkeys for which we will maintain an active
   129  // syncer at all times.
   130  type PinnedSyncers map[route.Vertex]struct{}
   131  
   132  // Config defines the configuration for the service. ALL elements within the
   133  // configuration MUST be non-nil for the service to carry out its duties.
   134  type Config struct {
   135  	// ChainHash is a hash that indicates which resident chain of the
   136  	// AuthenticatedGossiper. Any announcements that don't match this
   137  	// chain hash will be ignored.
   138  	//
   139  	// TODO(roasbeef): eventually make into map so can de-multiplex
   140  	// incoming announcements
   141  	//   * also need to do same for Notifier
   142  	ChainHash chainhash.Hash
   143  
   144  	// Router is the subsystem which is responsible for managing the
   145  	// topology of lightning network. After incoming channel, node, channel
   146  	// updates announcements are validated they are sent to the router in
   147  	// order to be included in the LN graph.
   148  	Router routing.ChannelGraphSource
   149  
   150  	// ChanSeries is an interfaces that provides access to a time series
   151  	// view of the current known channel graph. Each GossipSyncer enabled
   152  	// peer will utilize this in order to create and respond to channel
   153  	// graph time series queries.
   154  	ChanSeries ChannelGraphTimeSeries
   155  
   156  	// GossiperState is an interface that provides functions to persist
   157  	// data about the state of individual peer gossipers.
   158  	GossiperState GossiperState
   159  
   160  	// Notifier is used for receiving notifications of incoming blocks.
   161  	// With each new incoming block found we process previously premature
   162  	// announcements.
   163  	//
   164  	// TODO(roasbeef): could possibly just replace this with an epoch
   165  	// channel.
   166  	Notifier chainntnfs.ChainNotifier
   167  
   168  	// Broadcast broadcasts a particular set of announcements to all peers
   169  	// that the daemon is connected to. If supplied, the exclude parameter
   170  	// indicates that the target peer should be excluded from the
   171  	// broadcast.
   172  	Broadcast func(skips map[route.Vertex]struct{},
   173  		msg ...lnwire.Message) error
   174  
   175  	// NotifyWhenOnline is a function that allows the gossiper to be
   176  	// notified when a certain peer comes online, allowing it to retry
   177  	// sending a peer message.
   178  	//
   179  	// NOTE: The peerChan channel must be buffered.
   180  	NotifyWhenOnline func(peerPubKey [33]byte, peerChan chan<- lnpeer.Peer)
   181  
   182  	// NotifyWhenOffline is a function that allows the gossiper to be
   183  	// notified when a certain peer disconnects, allowing it to request a
   184  	// notification for when it reconnects.
   185  	NotifyWhenOffline func(peerPubKey [33]byte) <-chan struct{}
   186  
   187  	// SelfNodeAnnouncement is a function that fetches our own current node
   188  	// announcement, for use when determining whether we should update our
   189  	// peers about our presence on the network. If the refresh is true, a
   190  	// new and updated announcement will be returned.
   191  	SelfNodeAnnouncement func(refresh bool) (lnwire.NodeAnnouncement, error)
   192  
   193  	// ProofMatureDelta the number of confirmations which is needed before
   194  	// exchange the channel announcement proofs.
   195  	ProofMatureDelta uint32
   196  
   197  	// TrickleDelay the period of trickle timer which flushes to the
   198  	// network the pending batch of new announcements we've received since
   199  	// the last trickle tick.
   200  	TrickleDelay time.Duration
   201  
   202  	// RetransmitTicker is a ticker that ticks with a period which
   203  	// indicates that we should check if we need re-broadcast any of our
   204  	// personal channels.
   205  	RetransmitTicker ticker.Ticker
   206  
   207  	// RebroadcastInterval is the maximum time we wait between sending out
   208  	// channel updates for our active channels and our own node
   209  	// announcement. We do this to ensure our active presence on the
   210  	// network is known, and we are not being considered a zombie node or
   211  	// having zombie channels.
   212  	RebroadcastInterval time.Duration
   213  
   214  	// WaitingProofStore is a persistent storage of partial channel proof
   215  	// announcement messages. We use it to buffer half of the material
   216  	// needed to reconstruct a full authenticated channel announcement.
   217  	// Once we receive the other half the channel proof, we'll be able to
   218  	// properly validate it and re-broadcast it out to the network.
   219  	//
   220  	// TODO(wilmer): make interface to prevent channeldb dependency.
   221  	WaitingProofStore *channeldb.WaitingProofStore
   222  
   223  	// MessageStore is a persistent storage of gossip messages which we will
   224  	// use to determine which messages need to be resent for a given peer.
   225  	MessageStore GossipMessageStore
   226  
   227  	// AnnSigner is an instance of the MessageSigner interface which will
   228  	// be used to manually sign any outgoing channel updates. The signer
   229  	// implementation should be backed by the public key of the backing
   230  	// Lightning node.
   231  	//
   232  	// TODO(roasbeef): extract ann crafting + sign from fundingMgr into
   233  	// here?
   234  	AnnSigner lnwallet.MessageSigner
   235  
   236  	// NumActiveSyncers is the number of peers for which we should have
   237  	// active syncers with. After reaching NumActiveSyncers, any future
   238  	// gossip syncers will be passive.
   239  	NumActiveSyncers int
   240  
   241  	// RotateTicker is a ticker responsible for notifying the SyncManager
   242  	// when it should rotate its active syncers. A single active syncer with
   243  	// a chansSynced state will be exchanged for a passive syncer in order
   244  	// to ensure we don't keep syncing with the same peers.
   245  	RotateTicker ticker.Ticker
   246  
   247  	// HistoricalSyncTicker is a ticker responsible for notifying the
   248  	// syncManager when it should attempt a historical sync with a gossip
   249  	// sync peer.
   250  	HistoricalSyncTicker ticker.Ticker
   251  
   252  	// ActiveSyncerTimeoutTicker is a ticker responsible for notifying the
   253  	// syncManager when it should attempt to start the next pending
   254  	// activeSyncer due to the current one not completing its state machine
   255  	// within the timeout.
   256  	ActiveSyncerTimeoutTicker ticker.Ticker
   257  
   258  	// MinimumBatchSize is minimum size of a sub batch of announcement
   259  	// messages.
   260  	MinimumBatchSize int
   261  
   262  	// SubBatchDelay is the delay between sending sub batches of
   263  	// gossip messages.
   264  	SubBatchDelay time.Duration
   265  
   266  	// IgnoreHistoricalFilters will prevent syncers from replying with
   267  	// historical data when the remote peer sets a gossip_timestamp_range.
   268  	// This prevents ranges with old start times from causing us to dump the
   269  	// graph on connect.
   270  	IgnoreHistoricalFilters bool
   271  
   272  	// PinnedSyncers is a set of peers that will always transition to
   273  	// ActiveSync upon connection. These peers will never transition to
   274  	// PassiveSync.
   275  	PinnedSyncers PinnedSyncers
   276  
   277  	// MaxChannelUpdateBurst specifies the maximum number of updates for a
   278  	// specific channel and direction that we'll accept over an interval.
   279  	MaxChannelUpdateBurst int
   280  
   281  	// ChannelUpdateInterval specifies the interval we'll use to determine
   282  	// how often we should allow a new update for a specific channel and
   283  	// direction.
   284  	ChannelUpdateInterval time.Duration
   285  }
   286  
   287  // cachedNetworkMsg is a wrapper around a network message that can be used with
   288  // *lru.Cache.
   289  type cachedNetworkMsg struct {
   290  	msgs []*networkMsg
   291  }
   292  
   293  // Size returns the "size" of an entry. We return the number of items as we
   294  // just want to limit the total amount of entires rather than do accurate size
   295  // accounting.
   296  func (c *cachedNetworkMsg) Size() (uint64, error) {
   297  	return uint64(len(c.msgs)), nil
   298  }
   299  
   300  // rejectCacheKey is the cache key that we'll use to track announcements we've
   301  // recently rejected.
   302  type rejectCacheKey struct {
   303  	pubkey [33]byte
   304  	chanID uint64
   305  }
   306  
   307  // newRejectCacheKey returns a new cache key for the reject cache.
   308  func newRejectCacheKey(cid uint64, pub [33]byte) rejectCacheKey {
   309  	k := rejectCacheKey{
   310  		chanID: cid,
   311  		pubkey: pub,
   312  	}
   313  
   314  	return k
   315  }
   316  
   317  // sourceToPub returns a serialized-compressed public key for use in the reject
   318  // cache.
   319  func sourceToPub(pk *secp256k1.PublicKey) [33]byte {
   320  	var pub [33]byte
   321  	copy(pub[:], pk.SerializeCompressed())
   322  	return pub
   323  }
   324  
   325  // cachedReject is the empty value used to track the value for rejects.
   326  type cachedReject struct {
   327  }
   328  
   329  // Size returns the "size" of an entry. We return 1 as we just want to limit
   330  // the total size.
   331  func (c *cachedReject) Size() (uint64, error) {
   332  	return 1, nil
   333  }
   334  
   335  // AuthenticatedGossiper is a subsystem which is responsible for receiving
   336  // announcements, validating them and applying the changes to router, syncing
   337  // lightning network with newly connected nodes, broadcasting announcements
   338  // after validation, negotiating the channel announcement proofs exchange and
   339  // handling the premature announcements. All outgoing announcements are
   340  // expected to be properly signed as dictated in BOLT#7, additionally, all
   341  // incoming message are expected to be well formed and signed. Invalid messages
   342  // will be rejected by this struct.
   343  type AuthenticatedGossiper struct {
   344  	// Parameters which are needed to properly handle the start and stop of
   345  	// the service.
   346  	started sync.Once
   347  	stopped sync.Once
   348  
   349  	// bestHeight is the height of the block at the tip of the main chain
   350  	// as we know it. Accesses *MUST* be done with the gossiper's lock
   351  	// held.
   352  	bestHeight uint32
   353  
   354  	quit chan struct{}
   355  	wg   sync.WaitGroup
   356  
   357  	// cfg is a copy of the configuration struct that the gossiper service
   358  	// was initialized with.
   359  	cfg *Config
   360  
   361  	// blockEpochs encapsulates a stream of block epochs that are sent at
   362  	// every new block height.
   363  	blockEpochs *chainntnfs.BlockEpochEvent
   364  
   365  	// prematureChannelUpdates is a map of ChannelUpdates we have received
   366  	// that wasn't associated with any channel we know about.  We store
   367  	// them temporarily, such that we can reprocess them when a
   368  	// ChannelAnnouncement for the channel is received.
   369  	prematureChannelUpdates *lru.Cache
   370  
   371  	// networkMsgs is a channel that carries new network broadcasted
   372  	// message from outside the gossiper service to be processed by the
   373  	// networkHandler.
   374  	networkMsgs chan *networkMsg
   375  
   376  	// futureMsgs is a list of premature network messages that have a block
   377  	// height specified in the future. We will save them and resend it to
   378  	// the chan networkMsgs once the block height has reached. The cached
   379  	// map format is,
   380  	//   {blockHeight: [msg1, msg2, ...], ...}
   381  	futureMsgs *lru.Cache
   382  
   383  	// chanPolicyUpdates is a channel that requests to update the
   384  	// forwarding policy of a set of channels is sent over.
   385  	chanPolicyUpdates chan *chanPolicyUpdateRequest
   386  
   387  	// selfKey is the identity public key of the backing Lightning node.
   388  	selfKey *secp256k1.PublicKey
   389  
   390  	// selfKeyLoc is the locator for the identity public key of the backing
   391  	// Lightning node.
   392  	selfKeyLoc keychain.KeyLocator
   393  
   394  	// channelMtx is used to restrict the database access to one
   395  	// goroutine per channel ID. This is done to ensure that when
   396  	// the gossiper is handling an announcement, the db state stays
   397  	// consistent between when the DB is first read until it's written.
   398  	channelMtx *multimutex.Mutex
   399  
   400  	recentRejects *lru.Cache
   401  
   402  	// syncMgr is a subsystem responsible for managing the gossip syncers
   403  	// for peers currently connected. When a new peer is connected, the
   404  	// manager will create its accompanying gossip syncer and determine
   405  	// whether it should have an activeSync or passiveSync sync type based
   406  	// on how many other gossip syncers are currently active. Any activeSync
   407  	// gossip syncers are started in a round-robin manner to ensure we're
   408  	// not syncing with multiple peers at the same time.
   409  	syncMgr *SyncManager
   410  
   411  	// reliableSender is a subsystem responsible for handling reliable
   412  	// message send requests to peers. This should only be used for channels
   413  	// that are unadvertised at the time of handling the message since if it
   414  	// is advertised, then peers should be able to get the message from the
   415  	// network.
   416  	reliableSender *reliableSender
   417  
   418  	// chanUpdateRateLimiter contains rate limiters for each direction of
   419  	// a channel update we've processed. We'll use these to determine
   420  	// whether we should accept a new update for a specific channel and
   421  	// direction.
   422  	//
   423  	// NOTE: This map must be synchronized with the main
   424  	// AuthenticatedGossiper lock.
   425  	chanUpdateRateLimiter map[uint64][2]*rate.Limiter
   426  
   427  	sync.Mutex
   428  }
   429  
   430  // New creates a new AuthenticatedGossiper instance, initialized with the
   431  // passed configuration parameters.
   432  func New(cfg Config, selfKeyDesc *keychain.KeyDescriptor) *AuthenticatedGossiper {
   433  	gossiper := &AuthenticatedGossiper{
   434  		selfKey:                 selfKeyDesc.PubKey,
   435  		selfKeyLoc:              selfKeyDesc.KeyLocator,
   436  		cfg:                     &cfg,
   437  		networkMsgs:             make(chan *networkMsg),
   438  		futureMsgs:              lru.NewCache(maxPrematureUpdates),
   439  		quit:                    make(chan struct{}),
   440  		chanPolicyUpdates:       make(chan *chanPolicyUpdateRequest),
   441  		prematureChannelUpdates: lru.NewCache(maxPrematureUpdates),
   442  		channelMtx:              multimutex.NewMutex(),
   443  		recentRejects:           lru.NewCache(maxRejectedUpdates),
   444  		chanUpdateRateLimiter:   make(map[uint64][2]*rate.Limiter),
   445  	}
   446  
   447  	gossiper.syncMgr = newSyncManager(&SyncManagerCfg{
   448  		ChainHash:               cfg.ChainHash,
   449  		ChanSeries:              cfg.ChanSeries,
   450  		GossiperState:           cfg.GossiperState,
   451  		RotateTicker:            cfg.RotateTicker,
   452  		HistoricalSyncTicker:    cfg.HistoricalSyncTicker,
   453  		NumActiveSyncers:        cfg.NumActiveSyncers,
   454  		IgnoreHistoricalFilters: cfg.IgnoreHistoricalFilters,
   455  		BestHeight:              gossiper.latestHeight,
   456  		PinnedSyncers:           cfg.PinnedSyncers,
   457  	})
   458  
   459  	gossiper.reliableSender = newReliableSender(&reliableSenderCfg{
   460  		NotifyWhenOnline:  cfg.NotifyWhenOnline,
   461  		NotifyWhenOffline: cfg.NotifyWhenOffline,
   462  		MessageStore:      cfg.MessageStore,
   463  		IsMsgStale:        gossiper.isMsgStale,
   464  	})
   465  
   466  	return gossiper
   467  }
   468  
   469  // EdgeWithInfo contains the information that is required to update an edge.
   470  type EdgeWithInfo struct {
   471  	// Info describes the channel.
   472  	Info *channeldb.ChannelEdgeInfo
   473  
   474  	// Edge describes the policy in one direction of the channel.
   475  	Edge *channeldb.ChannelEdgePolicy
   476  }
   477  
   478  // PropagateChanPolicyUpdate signals the AuthenticatedGossiper to perform the
   479  // specified edge updates. Updates are done in two stages: first, the
   480  // AuthenticatedGossiper ensures the update has been committed by dependent
   481  // sub-systems, then it signs and broadcasts new updates to the network. A
   482  // mapping between outpoints and updated channel policies is returned, which is
   483  // used to update the forwarding policies of the underlying links.
   484  func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate(
   485  	edgesToUpdate []EdgeWithInfo) error {
   486  
   487  	errChan := make(chan error, 1)
   488  	policyUpdate := &chanPolicyUpdateRequest{
   489  		edgesToUpdate: edgesToUpdate,
   490  		errChan:       errChan,
   491  	}
   492  
   493  	select {
   494  	case d.chanPolicyUpdates <- policyUpdate:
   495  		err := <-errChan
   496  		return err
   497  	case <-d.quit:
   498  		return fmt.Errorf("AuthenticatedGossiper shutting down")
   499  	}
   500  }
   501  
   502  // Start spawns network messages handler goroutine and registers on new block
   503  // notifications in order to properly handle the premature announcements.
   504  func (d *AuthenticatedGossiper) Start() error {
   505  	var err error
   506  	d.started.Do(func() {
   507  		err = d.start()
   508  	})
   509  	return err
   510  }
   511  
   512  func (d *AuthenticatedGossiper) start() error {
   513  	log.Info("Authenticated Gossiper is starting")
   514  
   515  	// First we register for new notifications of newly discovered blocks.
   516  	// We do this immediately so we'll later be able to consume any/all
   517  	// blocks which were discovered.
   518  	blockEpochs, err := d.cfg.Notifier.RegisterBlockEpochNtfn(nil)
   519  	if err != nil {
   520  		return err
   521  	}
   522  	d.blockEpochs = blockEpochs
   523  
   524  	height, err := d.cfg.Router.CurrentBlockHeight()
   525  	if err != nil {
   526  		return err
   527  	}
   528  	d.bestHeight = height
   529  
   530  	// Start the reliable sender. In case we had any pending messages ready
   531  	// to be sent when the gossiper was last shut down, we must continue on
   532  	// our quest to deliver them to their respective peers.
   533  	if err := d.reliableSender.Start(); err != nil {
   534  		return err
   535  	}
   536  
   537  	d.syncMgr.Start()
   538  
   539  	// Start receiving blocks in its dedicated goroutine.
   540  	d.wg.Add(2)
   541  	go d.syncBlockHeight()
   542  	go d.networkHandler()
   543  
   544  	return nil
   545  }
   546  
   547  // syncBlockHeight syncs the best block height for the gossiper by reading
   548  // blockEpochs.
   549  //
   550  // NOTE: must be run as a goroutine.
   551  func (d *AuthenticatedGossiper) syncBlockHeight() {
   552  	defer d.wg.Done()
   553  
   554  	for {
   555  		select {
   556  		// A new block has arrived, so we can re-process the previously
   557  		// premature announcements.
   558  		case newBlock, ok := <-d.blockEpochs.Epochs:
   559  			// If the channel has been closed, then this indicates
   560  			// the daemon is shutting down, so we exit ourselves.
   561  			if !ok {
   562  				return
   563  			}
   564  
   565  			// Once a new block arrives, we update our running
   566  			// track of the height of the chain tip.
   567  			d.Lock()
   568  			blockHeight := uint32(newBlock.Height)
   569  			d.bestHeight = blockHeight
   570  			d.Unlock()
   571  
   572  			log.Debugf("New block: height=%d, hash=%s", blockHeight,
   573  				newBlock.Hash)
   574  
   575  			// Resend future messages, if any.
   576  			d.resendFutureMessages(blockHeight)
   577  
   578  		case <-d.quit:
   579  			return
   580  		}
   581  	}
   582  }
   583  
   584  // resendFutureMessages takes a block height, resends all the future messages
   585  // found at that height and deletes those messages found in the gossiper's
   586  // futureMsgs.
   587  func (d *AuthenticatedGossiper) resendFutureMessages(height uint32) {
   588  	result, err := d.futureMsgs.Get(height)
   589  
   590  	// Return early if no messages found.
   591  	if err == cache.ErrElementNotFound {
   592  		return
   593  	}
   594  
   595  	// The error must nil, we will log an error and exit.
   596  	if err != nil {
   597  		log.Errorf("Reading future messages got error: %v", err)
   598  		return
   599  	}
   600  
   601  	msgs := result.(*cachedNetworkMsg).msgs
   602  
   603  	log.Debugf("Resending %d network messages at height %d",
   604  		len(msgs), height)
   605  
   606  	for _, msg := range msgs {
   607  		select {
   608  		case d.networkMsgs <- msg:
   609  		case <-d.quit:
   610  			msg.err <- ErrGossiperShuttingDown
   611  		}
   612  	}
   613  }
   614  
   615  // Stop signals any active goroutines for a graceful closure.
   616  func (d *AuthenticatedGossiper) Stop() error {
   617  	d.stopped.Do(func() {
   618  		log.Info("Authenticated gossiper shutting down")
   619  		d.stop()
   620  	})
   621  	return nil
   622  }
   623  
   624  func (d *AuthenticatedGossiper) stop() {
   625  	log.Info("Authenticated Gossiper is stopping")
   626  
   627  	d.blockEpochs.Cancel()
   628  
   629  	d.syncMgr.Stop()
   630  
   631  	close(d.quit)
   632  	d.wg.Wait()
   633  
   634  	// We'll stop our reliable sender after all of the gossiper's goroutines
   635  	// have exited to ensure nothing can cause it to continue executing.
   636  	d.reliableSender.Stop()
   637  }
   638  
   639  // TODO(roasbeef): need method to get current gossip timestamp?
   640  //  * using mtx, check time rotate forward is needed?
   641  
   642  // ProcessRemoteAnnouncement sends a new remote announcement message along with
   643  // the peer that sent the routing message. The announcement will be processed
   644  // then added to a queue for batched trickled announcement to all connected
   645  // peers.  Remote channel announcements should contain the announcement proof
   646  // and be fully validated.
   647  func (d *AuthenticatedGossiper) ProcessRemoteAnnouncement(msg lnwire.Message,
   648  	peer lnpeer.Peer) chan error {
   649  
   650  	errChan := make(chan error, 1)
   651  
   652  	// For messages in the known set of channel series queries, we'll
   653  	// dispatch the message directly to the GossipSyncer, and skip the main
   654  	// processing loop.
   655  	switch m := msg.(type) {
   656  	case *lnwire.QueryShortChanIDs,
   657  		*lnwire.QueryChannelRange,
   658  		*lnwire.ReplyChannelRange,
   659  		*lnwire.ReplyShortChanIDsEnd:
   660  
   661  		syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey())
   662  		if !ok {
   663  			log.Warnf("Gossip syncer for peer=%x not found",
   664  				peer.PubKey())
   665  
   666  			errChan <- ErrGossipSyncerNotFound
   667  			return errChan
   668  		}
   669  
   670  		// If we've found the message target, then we'll dispatch the
   671  		// message directly to it.
   672  		syncer.ProcessQueryMsg(m, peer.QuitSignal())
   673  
   674  		errChan <- nil
   675  		return errChan
   676  
   677  	// If a peer is updating its current update horizon, then we'll dispatch
   678  	// that directly to the proper GossipSyncer.
   679  	case *lnwire.GossipTimestampRange:
   680  		syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey())
   681  		if !ok {
   682  			log.Warnf("Gossip syncer for peer=%x not found",
   683  				peer.PubKey())
   684  
   685  			errChan <- ErrGossipSyncerNotFound
   686  			return errChan
   687  		}
   688  
   689  		// If we've found the message target, then we'll dispatch the
   690  		// message directly to it.
   691  		if err := syncer.ApplyGossipFilter(m); err != nil {
   692  			log.Warnf("Unable to apply gossip filter for peer=%x: "+
   693  				"%v", peer.PubKey(), err)
   694  
   695  			errChan <- err
   696  			return errChan
   697  		}
   698  
   699  		errChan <- nil
   700  		return errChan
   701  
   702  	// To avoid inserting edges in the graph for our own channels that we
   703  	// have already closed, we ignore such channel announcements coming
   704  	// from the remote.
   705  	case *lnwire.ChannelAnnouncement:
   706  		ownKey := d.selfKey.SerializeCompressed()
   707  		ownErr := fmt.Errorf("ignoring remote ChannelAnnouncement "+
   708  			"for own channel %s received from %v", m.ShortChannelID,
   709  			peer)
   710  
   711  		if bytes.Equal(m.NodeID1[:], ownKey) ||
   712  			bytes.Equal(m.NodeID2[:], ownKey) {
   713  
   714  			log.Debug(ownErr)
   715  			errChan <- ownErr
   716  			return errChan
   717  		}
   718  	}
   719  
   720  	nMsg := &networkMsg{
   721  		msg:      msg,
   722  		isRemote: true,
   723  		peer:     peer,
   724  		source:   peer.IdentityKey(),
   725  		err:      errChan,
   726  	}
   727  
   728  	select {
   729  	case d.networkMsgs <- nMsg:
   730  
   731  	// If the peer that sent us this error is quitting, then we don't need
   732  	// to send back an error and can return immediately.
   733  	case <-peer.QuitSignal():
   734  		return nil
   735  	case <-d.quit:
   736  		nMsg.err <- ErrGossiperShuttingDown
   737  	}
   738  
   739  	return nMsg.err
   740  }
   741  
   742  // ProcessLocalAnnouncement sends a new remote announcement message along with
   743  // the peer that sent the routing message. The announcement will be processed
   744  // then added to a queue for batched trickled announcement to all connected
   745  // peers.  Local channel announcements don't contain the announcement proof and
   746  // will not be fully validated. Once the channel proofs are received, the
   747  // entire channel announcement and update messages will be re-constructed and
   748  // broadcast to the rest of the network.
   749  func (d *AuthenticatedGossiper) ProcessLocalAnnouncement(msg lnwire.Message,
   750  	optionalFields ...OptionalMsgField) chan error {
   751  
   752  	optionalMsgFields := &optionalMsgFields{}
   753  	optionalMsgFields.apply(optionalFields...)
   754  
   755  	nMsg := &networkMsg{
   756  		msg:               msg,
   757  		optionalMsgFields: optionalMsgFields,
   758  		isRemote:          false,
   759  		source:            d.selfKey,
   760  		err:               make(chan error, 1),
   761  	}
   762  
   763  	select {
   764  	case d.networkMsgs <- nMsg:
   765  	case <-d.quit:
   766  		nMsg.err <- ErrGossiperShuttingDown
   767  	}
   768  
   769  	return nMsg.err
   770  }
   771  
   772  // channelUpdateID is a unique identifier for ChannelUpdate messages, as
   773  // channel updates can be identified by the (ShortChannelID, ChannelFlags)
   774  // tuple.
   775  type channelUpdateID struct {
   776  	// channelID represents the set of data which is needed to
   777  	// retrieve all necessary data to validate the channel existence.
   778  	channelID lnwire.ShortChannelID
   779  
   780  	// Flags least-significant bit must be set to 0 if the creating node
   781  	// corresponds to the first node in the previously sent channel
   782  	// announcement and 1 otherwise.
   783  	flags lnwire.ChanUpdateChanFlags
   784  }
   785  
   786  // msgWithSenders is a wrapper struct around a message, and the set of peers
   787  // that originally sent us this message. Using this struct, we can ensure that
   788  // we don't re-send a message to the peer that sent it to us in the first
   789  // place.
   790  type msgWithSenders struct {
   791  	// msg is the wire message itself.
   792  	msg lnwire.Message
   793  
   794  	// sender is the set of peers that sent us this message.
   795  	senders map[route.Vertex]struct{}
   796  }
   797  
   798  // mergeSyncerMap is used to merge the set of senders of a particular message
   799  // with peers that we have an active GossipSyncer with. We do this to ensure
   800  // that we don't broadcast messages to any peers that we have active gossip
   801  // syncers for.
   802  func (m *msgWithSenders) mergeSyncerMap(syncers map[route.Vertex]*GossipSyncer) {
   803  	for peerPub := range syncers {
   804  		m.senders[peerPub] = struct{}{}
   805  	}
   806  }
   807  
   808  // deDupedAnnouncements de-duplicates announcements that have been added to the
   809  // batch. Internally, announcements are stored in three maps
   810  // (one each for channel announcements, channel updates, and node
   811  // announcements). These maps keep track of unique announcements and ensure no
   812  // announcements are duplicated. We keep the three message types separate, such
   813  // that we can send channel announcements first, then channel updates, and
   814  // finally node announcements when it's time to broadcast them.
   815  type deDupedAnnouncements struct {
   816  	// channelAnnouncements are identified by the short channel id field.
   817  	channelAnnouncements map[lnwire.ShortChannelID]msgWithSenders
   818  
   819  	// channelUpdates are identified by the channel update id field.
   820  	channelUpdates map[channelUpdateID]msgWithSenders
   821  
   822  	// nodeAnnouncements are identified by the Vertex field.
   823  	nodeAnnouncements map[route.Vertex]msgWithSenders
   824  
   825  	sync.Mutex
   826  }
   827  
   828  // Reset operates on deDupedAnnouncements to reset the storage of
   829  // announcements.
   830  func (d *deDupedAnnouncements) Reset() {
   831  	d.Lock()
   832  	defer d.Unlock()
   833  
   834  	d.reset()
   835  }
   836  
   837  // reset is the private version of the Reset method. We have this so we can
   838  // call this method within method that are already holding the lock.
   839  func (d *deDupedAnnouncements) reset() {
   840  	// Storage of each type of announcement (channel announcements, channel
   841  	// updates, node announcements) is set to an empty map where the
   842  	// appropriate key points to the corresponding lnwire.Message.
   843  	d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders)
   844  	d.channelUpdates = make(map[channelUpdateID]msgWithSenders)
   845  	d.nodeAnnouncements = make(map[route.Vertex]msgWithSenders)
   846  }
   847  
   848  // addMsg adds a new message to the current batch. If the message is already
   849  // present in the current batch, then this new instance replaces the latter,
   850  // and the set of senders is updated to reflect which node sent us this
   851  // message.
   852  func (d *deDupedAnnouncements) addMsg(message networkMsg) {
   853  	log.Tracef("Adding network message: %v to batch", message.msg.MsgType())
   854  
   855  	// Depending on the message type (channel announcement, channel update,
   856  	// or node announcement), the message is added to the corresponding map
   857  	// in deDupedAnnouncements. Because each identifying key can have at
   858  	// most one value, the announcements are de-duplicated, with newer ones
   859  	// replacing older ones.
   860  	switch msg := message.msg.(type) {
   861  
   862  	// Channel announcements are identified by the short channel id field.
   863  	case *lnwire.ChannelAnnouncement:
   864  		deDupKey := msg.ShortChannelID
   865  		sender := route.NewVertex(message.source)
   866  
   867  		mws, ok := d.channelAnnouncements[deDupKey]
   868  		if !ok {
   869  			mws = msgWithSenders{
   870  				msg:     msg,
   871  				senders: make(map[route.Vertex]struct{}),
   872  			}
   873  			mws.senders[sender] = struct{}{}
   874  
   875  			d.channelAnnouncements[deDupKey] = mws
   876  
   877  			return
   878  		}
   879  
   880  		mws.msg = msg
   881  		mws.senders[sender] = struct{}{}
   882  		d.channelAnnouncements[deDupKey] = mws
   883  
   884  	// Channel updates are identified by the (short channel id,
   885  	// channelflags) tuple.
   886  	case *lnwire.ChannelUpdate:
   887  		sender := route.NewVertex(message.source)
   888  		deDupKey := channelUpdateID{
   889  			msg.ShortChannelID,
   890  			msg.ChannelFlags,
   891  		}
   892  
   893  		oldTimestamp := uint32(0)
   894  		mws, ok := d.channelUpdates[deDupKey]
   895  		if ok {
   896  			// If we already have seen this message, record its
   897  			// timestamp.
   898  			oldTimestamp = mws.msg.(*lnwire.ChannelUpdate).Timestamp
   899  		}
   900  
   901  		// If we already had this message with a strictly newer
   902  		// timestamp, then we'll just discard the message we got.
   903  		if oldTimestamp > msg.Timestamp {
   904  			log.Debugf("Ignored outdated network message: "+
   905  				"peer=%v, source=%x, msg=%s, ", message.peer,
   906  				message.source.SerializeCompressed(),
   907  				msg.MsgType())
   908  			return
   909  		}
   910  
   911  		// If the message we just got is newer than what we previously
   912  		// have seen, or this is the first time we see it, then we'll
   913  		// add it to our map of announcements.
   914  		if oldTimestamp < msg.Timestamp {
   915  			mws = msgWithSenders{
   916  				msg:     msg,
   917  				senders: make(map[route.Vertex]struct{}),
   918  			}
   919  
   920  			// We'll mark the sender of the message in the
   921  			// senders map.
   922  			mws.senders[sender] = struct{}{}
   923  
   924  			d.channelUpdates[deDupKey] = mws
   925  
   926  			return
   927  		}
   928  
   929  		// Lastly, if we had seen this exact message from before, with
   930  		// the same timestamp, we'll add the sender to the map of
   931  		// senders, such that we can skip sending this message back in
   932  		// the next batch.
   933  		mws.msg = msg
   934  		mws.senders[sender] = struct{}{}
   935  		d.channelUpdates[deDupKey] = mws
   936  
   937  	// Node announcements are identified by the Vertex field.  Use the
   938  	// NodeID to create the corresponding Vertex.
   939  	case *lnwire.NodeAnnouncement:
   940  		sender := route.NewVertex(message.source)
   941  		deDupKey := route.Vertex(msg.NodeID)
   942  
   943  		// We do the same for node announcements as we did for channel
   944  		// updates, as they also carry a timestamp.
   945  		oldTimestamp := uint32(0)
   946  		mws, ok := d.nodeAnnouncements[deDupKey]
   947  		if ok {
   948  			oldTimestamp = mws.msg.(*lnwire.NodeAnnouncement).Timestamp
   949  		}
   950  
   951  		// Discard the message if it's old.
   952  		if oldTimestamp > msg.Timestamp {
   953  			return
   954  		}
   955  
   956  		// Replace if it's newer.
   957  		if oldTimestamp < msg.Timestamp {
   958  			mws = msgWithSenders{
   959  				msg:     msg,
   960  				senders: make(map[route.Vertex]struct{}),
   961  			}
   962  
   963  			mws.senders[sender] = struct{}{}
   964  
   965  			d.nodeAnnouncements[deDupKey] = mws
   966  
   967  			return
   968  		}
   969  
   970  		// Add to senders map if it's the same as we had.
   971  		mws.msg = msg
   972  		mws.senders[sender] = struct{}{}
   973  		d.nodeAnnouncements[deDupKey] = mws
   974  	}
   975  }
   976  
   977  // AddMsgs is a helper method to add multiple messages to the announcement
   978  // batch.
   979  func (d *deDupedAnnouncements) AddMsgs(msgs ...networkMsg) {
   980  	d.Lock()
   981  	defer d.Unlock()
   982  
   983  	for _, msg := range msgs {
   984  		d.addMsg(msg)
   985  	}
   986  }
   987  
   988  // Emit returns the set of de-duplicated announcements to be sent out during
   989  // the next announcement epoch, in the order of channel announcements, channel
   990  // updates, and node announcements. Each message emitted, contains the set of
   991  // peers that sent us the message. This way, we can ensure that we don't waste
   992  // bandwidth by re-sending a message to the peer that sent it to us in the
   993  // first place. Additionally, the set of stored messages are reset.
   994  func (d *deDupedAnnouncements) Emit() []msgWithSenders {
   995  	d.Lock()
   996  	defer d.Unlock()
   997  
   998  	// Get the total number of announcements.
   999  	numAnnouncements := len(d.channelAnnouncements) + len(d.channelUpdates) +
  1000  		len(d.nodeAnnouncements)
  1001  
  1002  	// Create an empty array of lnwire.Messages with a length equal to
  1003  	// the total number of announcements.
  1004  	msgs := make([]msgWithSenders, 0, numAnnouncements)
  1005  
  1006  	// Add the channel announcements to the array first.
  1007  	for _, message := range d.channelAnnouncements {
  1008  		msgs = append(msgs, message)
  1009  	}
  1010  
  1011  	// Then add the channel updates.
  1012  	for _, message := range d.channelUpdates {
  1013  		msgs = append(msgs, message)
  1014  	}
  1015  
  1016  	// Finally add the node announcements.
  1017  	for _, message := range d.nodeAnnouncements {
  1018  		msgs = append(msgs, message)
  1019  	}
  1020  
  1021  	d.reset()
  1022  
  1023  	// Return the array of lnwire.messages.
  1024  	return msgs
  1025  }
  1026  
  1027  // calculateSubBatchSize is a helper function that calculates the size to break
  1028  // down the batchSize into.
  1029  func calculateSubBatchSize(totalDelay, subBatchDelay time.Duration,
  1030  	minimumBatchSize, batchSize int) int {
  1031  	if subBatchDelay > totalDelay {
  1032  		return batchSize
  1033  	}
  1034  
  1035  	subBatchSize := (batchSize*int(subBatchDelay) + int(totalDelay) - 1) /
  1036  		int(totalDelay)
  1037  
  1038  	if subBatchSize < minimumBatchSize {
  1039  		return minimumBatchSize
  1040  	}
  1041  
  1042  	return subBatchSize
  1043  }
  1044  
  1045  // splitAnnouncementBatches takes an exiting list of announcements and
  1046  // decomposes it into sub batches controlled by the `subBatchSize`.
  1047  func splitAnnouncementBatches(subBatchSize int,
  1048  	announcementBatch []msgWithSenders) [][]msgWithSenders {
  1049  	var splitAnnouncementBatch [][]msgWithSenders
  1050  
  1051  	for subBatchSize < len(announcementBatch) {
  1052  		// For slicing with minimal allocation
  1053  		// https://github.com/golang/go/wiki/SliceTricks
  1054  		announcementBatch, splitAnnouncementBatch =
  1055  			announcementBatch[subBatchSize:],
  1056  			append(splitAnnouncementBatch,
  1057  				announcementBatch[0:subBatchSize:subBatchSize])
  1058  	}
  1059  	splitAnnouncementBatch = append(splitAnnouncementBatch, announcementBatch)
  1060  
  1061  	return splitAnnouncementBatch
  1062  }
  1063  
  1064  // sendBatch broadcasts a list of announcements to our peers.
  1065  func (d *AuthenticatedGossiper) sendBatch(announcementBatch []msgWithSenders) {
  1066  	syncerPeers := d.syncMgr.GossipSyncers()
  1067  
  1068  	// We'll first attempt to filter out this new message
  1069  	// for all peers that have active gossip syncers
  1070  	// active.
  1071  	for _, syncer := range syncerPeers {
  1072  		syncer.FilterGossipMsgs(announcementBatch...)
  1073  	}
  1074  
  1075  	for _, msgChunk := range announcementBatch {
  1076  		// With the syncers taken care of, we'll merge
  1077  		// the sender map with the set of syncers, so
  1078  		// we don't send out duplicate messages.
  1079  		msgChunk.mergeSyncerMap(syncerPeers)
  1080  
  1081  		err := d.cfg.Broadcast(
  1082  			msgChunk.senders, msgChunk.msg,
  1083  		)
  1084  		if err != nil {
  1085  			log.Errorf("Unable to send batch "+
  1086  				"announcements: %v", err)
  1087  			continue
  1088  		}
  1089  	}
  1090  }
  1091  
  1092  // networkHandler is the primary goroutine that drives this service. The roles
  1093  // of this goroutine includes answering queries related to the state of the
  1094  // network, syncing up newly connected peers, and also periodically
  1095  // broadcasting our latest topology state to all connected peers.
  1096  //
  1097  // NOTE: This MUST be run as a goroutine.
  1098  func (d *AuthenticatedGossiper) networkHandler() {
  1099  	defer d.wg.Done()
  1100  
  1101  	// Initialize empty deDupedAnnouncements to store announcement batch.
  1102  	announcements := deDupedAnnouncements{}
  1103  	announcements.Reset()
  1104  
  1105  	d.cfg.RetransmitTicker.Resume()
  1106  	defer d.cfg.RetransmitTicker.Stop()
  1107  
  1108  	trickleTimer := time.NewTicker(d.cfg.TrickleDelay)
  1109  	defer trickleTimer.Stop()
  1110  
  1111  	// To start, we'll first check to see if there are any stale channel or
  1112  	// node announcements that we need to re-transmit.
  1113  	if err := d.retransmitStaleAnns(time.Now()); err != nil {
  1114  		log.Errorf("Unable to rebroadcast stale announcements: %v", err)
  1115  	}
  1116  
  1117  	// We'll use this validation to ensure that we process jobs in their
  1118  	// dependency order during parallel validation.
  1119  	validationBarrier := routing.NewValidationBarrier(1000, d.quit)
  1120  
  1121  	for {
  1122  		select {
  1123  		// A new policy update has arrived. We'll commit it to the
  1124  		// sub-systems below us, then craft, sign, and broadcast a new
  1125  		// ChannelUpdate for the set of affected clients.
  1126  		case policyUpdate := <-d.chanPolicyUpdates:
  1127  			log.Tracef("Received channel %d policy update requests",
  1128  				len(policyUpdate.edgesToUpdate))
  1129  
  1130  			// First, we'll now create new fully signed updates for
  1131  			// the affected channels and also update the underlying
  1132  			// graph with the new state.
  1133  			newChanUpdates, err := d.processChanPolicyUpdate(
  1134  				policyUpdate.edgesToUpdate,
  1135  			)
  1136  			policyUpdate.errChan <- err
  1137  			if err != nil {
  1138  				log.Errorf("Unable to craft policy updates: %v",
  1139  					err)
  1140  				continue
  1141  			}
  1142  
  1143  			// Finally, with the updates committed, we'll now add
  1144  			// them to the announcement batch to be flushed at the
  1145  			// start of the next epoch.
  1146  			announcements.AddMsgs(newChanUpdates...)
  1147  
  1148  		case announcement := <-d.networkMsgs:
  1149  			log.Tracef("Received network message: "+
  1150  				"peer=%v, source=%x, msg=%s, is_remote=%v",
  1151  				announcement.peer,
  1152  				announcement.source.SerializeCompressed(),
  1153  				announcement.msg.MsgType(),
  1154  				announcement.isRemote)
  1155  
  1156  			// We should only broadcast this message forward if it
  1157  			// originated from us or it wasn't received as part of
  1158  			// our initial historical sync.
  1159  			shouldBroadcast := !announcement.isRemote ||
  1160  				d.syncMgr.IsGraphSynced()
  1161  
  1162  			switch announcement.msg.(type) {
  1163  			// Channel announcement signatures are amongst the only
  1164  			// messages that we'll process serially.
  1165  			case *lnwire.AnnounceSignatures:
  1166  				emittedAnnouncements, _ := d.processNetworkAnnouncement(
  1167  					announcement,
  1168  				)
  1169  				log.Debugf("Processed network message %s, "+
  1170  					"returned len(announcements)=%v",
  1171  					announcement.msg.MsgType(),
  1172  					len(emittedAnnouncements))
  1173  
  1174  				if emittedAnnouncements != nil {
  1175  					announcements.AddMsgs(
  1176  						emittedAnnouncements...,
  1177  					)
  1178  				}
  1179  				continue
  1180  			}
  1181  
  1182  			// If this message was recently rejected, then we won't
  1183  			// attempt to re-process it.
  1184  			if announcement.isRemote && d.isRecentlyRejectedMsg(
  1185  				announcement.msg,
  1186  				sourceToPub(announcement.source),
  1187  			) {
  1188  				announcement.err <- fmt.Errorf("recently " +
  1189  					"rejected")
  1190  				continue
  1191  			}
  1192  
  1193  			// We'll set up any dependent, and wait until a free
  1194  			// slot for this job opens up, this allow us to not
  1195  			// have thousands of goroutines active.
  1196  			validationBarrier.InitJobDependencies(announcement.msg)
  1197  
  1198  			d.wg.Add(1)
  1199  			go func() {
  1200  				defer d.wg.Done()
  1201  				defer validationBarrier.CompleteJob()
  1202  
  1203  				// If this message has an existing dependency,
  1204  				// then we'll wait until that has been fully
  1205  				// validated before we proceed.
  1206  				err := validationBarrier.WaitForDependants(
  1207  					announcement.msg,
  1208  				)
  1209  				if err != nil {
  1210  					if !routing.IsError(
  1211  						err,
  1212  						routing.ErrVBarrierShuttingDown,
  1213  						routing.ErrParentValidationFailed,
  1214  					) {
  1215  						log.Warnf("unexpected error "+
  1216  							"during validation "+
  1217  							"barrier shutdown: %v",
  1218  							err)
  1219  					}
  1220  					announcement.err <- err
  1221  					return
  1222  				}
  1223  
  1224  				// Process the network announcement to
  1225  				// determine if this is either a new
  1226  				// announcement from our PoV or an edges to a
  1227  				// prior vertex/edge we previously proceeded.
  1228  				emittedAnnouncements, allowDependents := d.processNetworkAnnouncement(
  1229  					announcement,
  1230  				)
  1231  
  1232  				log.Tracef("Processed network message %s, "+
  1233  					"returned len(announcements)=%v, "+
  1234  					"allowDependents=%v",
  1235  					announcement.msg.MsgType(),
  1236  					len(emittedAnnouncements),
  1237  					allowDependents)
  1238  
  1239  				// If this message had any dependencies, then
  1240  				// we can now signal them to continue.
  1241  				validationBarrier.SignalDependants(
  1242  					announcement.msg, allowDependents,
  1243  				)
  1244  
  1245  				// If the announcement was accepted, then add
  1246  				// the emitted announcements to our announce
  1247  				// batch to be broadcast once the trickle timer
  1248  				// ticks gain.
  1249  				if emittedAnnouncements != nil && shouldBroadcast {
  1250  					// TODO(roasbeef): exclude peer that
  1251  					// sent.
  1252  					announcements.AddMsgs(
  1253  						emittedAnnouncements...,
  1254  					)
  1255  				} else if emittedAnnouncements != nil {
  1256  					log.Trace("Skipping broadcast of " +
  1257  						"announcements received " +
  1258  						"during initial graph sync")
  1259  				}
  1260  
  1261  			}()
  1262  
  1263  		// The trickle timer has ticked, which indicates we should
  1264  		// flush to the network the pending batch of new announcements
  1265  		// we've received since the last trickle tick.
  1266  		case <-trickleTimer.C:
  1267  			// Emit the current batch of announcements from
  1268  			// deDupedAnnouncements.
  1269  			announcementBatch := announcements.Emit()
  1270  
  1271  			// If the current announcements batch is nil, then we
  1272  			// have no further work here.
  1273  			if len(announcementBatch) == 0 {
  1274  				continue
  1275  			}
  1276  
  1277  			// Next, If we have new things to announce then
  1278  			// broadcast them to all our immediately connected
  1279  			// peers.
  1280  			subBatchSize := calculateSubBatchSize(
  1281  				d.cfg.TrickleDelay, d.cfg.SubBatchDelay, d.cfg.MinimumBatchSize,
  1282  				len(announcementBatch),
  1283  			)
  1284  
  1285  			splitAnnouncementBatch := splitAnnouncementBatches(
  1286  				subBatchSize, announcementBatch,
  1287  			)
  1288  
  1289  			d.wg.Add(1)
  1290  			go func() {
  1291  				defer d.wg.Done()
  1292  				log.Infof("Broadcasting %v new announcements in %d sub batches",
  1293  					len(announcementBatch), len(splitAnnouncementBatch))
  1294  
  1295  				for _, announcementBatch := range splitAnnouncementBatch {
  1296  					d.sendBatch(announcementBatch)
  1297  					select {
  1298  					case <-time.After(d.cfg.SubBatchDelay):
  1299  					case <-d.quit:
  1300  						return
  1301  					}
  1302  				}
  1303  			}()
  1304  
  1305  		// The retransmission timer has ticked which indicates that we
  1306  		// should check if we need to prune or re-broadcast any of our
  1307  		// personal channels or node announcement. This addresses the
  1308  		// case of "zombie" channels and channel advertisements that
  1309  		// have been dropped, or not properly propagated through the
  1310  		// network.
  1311  		case tick := <-d.cfg.RetransmitTicker.Ticks():
  1312  			if err := d.retransmitStaleAnns(tick); err != nil {
  1313  				log.Errorf("unable to rebroadcast stale "+
  1314  					"announcements: %v", err)
  1315  			}
  1316  
  1317  		// The gossiper has been signalled to exit, to we exit our
  1318  		// main loop so the wait group can be decremented.
  1319  		case <-d.quit:
  1320  			return
  1321  		}
  1322  	}
  1323  }
  1324  
  1325  // TODO(roasbeef): d/c peers that send updates not on our chain
  1326  
  1327  // InitSyncState is called by outside sub-systems when a connection is
  1328  // established to a new peer that understands how to perform channel range
  1329  // queries. We'll allocate a new gossip syncer for it, and start any goroutines
  1330  // needed to handle new queries.
  1331  func (d *AuthenticatedGossiper) InitSyncState(syncPeer lnpeer.Peer) {
  1332  	d.syncMgr.InitSyncState(syncPeer)
  1333  }
  1334  
  1335  // PruneSyncState is called by outside sub-systems once a peer that we were
  1336  // previously connected to has been disconnected. In this case we can stop the
  1337  // existing GossipSyncer assigned to the peer and free up resources.
  1338  func (d *AuthenticatedGossiper) PruneSyncState(peer route.Vertex) {
  1339  	d.syncMgr.PruneSyncState(peer)
  1340  }
  1341  
  1342  // isRecentlyRejectedMsg returns true if we recently rejected a message, and
  1343  // false otherwise, This avoids expensive reprocessing of the message.
  1344  func (d *AuthenticatedGossiper) isRecentlyRejectedMsg(msg lnwire.Message,
  1345  	peerPub [33]byte) bool {
  1346  
  1347  	var scid uint64
  1348  	switch m := msg.(type) {
  1349  	case *lnwire.ChannelUpdate:
  1350  		scid = m.ShortChannelID.ToUint64()
  1351  
  1352  	case *lnwire.ChannelAnnouncement:
  1353  		scid = m.ShortChannelID.ToUint64()
  1354  
  1355  	default:
  1356  		return false
  1357  	}
  1358  
  1359  	_, err := d.recentRejects.Get(newRejectCacheKey(scid, peerPub))
  1360  	return err != cache.ErrElementNotFound
  1361  }
  1362  
  1363  // retransmitStaleAnns examines all outgoing channels that the source node is
  1364  // known to maintain to check to see if any of them are "stale". A channel is
  1365  // stale iff, the last timestamp of its rebroadcast is older than the
  1366  // RebroadcastInterval. We also check if a refreshed node announcement should
  1367  // be resent.
  1368  func (d *AuthenticatedGossiper) retransmitStaleAnns(now time.Time) error {
  1369  	// Iterate over all of our channels and check if any of them fall
  1370  	// within the prune interval or re-broadcast interval.
  1371  	type updateTuple struct {
  1372  		info *channeldb.ChannelEdgeInfo
  1373  		edge *channeldb.ChannelEdgePolicy
  1374  	}
  1375  
  1376  	var (
  1377  		havePublicChannels bool
  1378  		edgesToUpdate      []updateTuple
  1379  	)
  1380  	err := d.cfg.Router.ForAllOutgoingChannels(func(
  1381  		_ kvdb.RTx,
  1382  		info *channeldb.ChannelEdgeInfo,
  1383  		edge *channeldb.ChannelEdgePolicy) error {
  1384  
  1385  		// If there's no auth proof attached to this edge, it means
  1386  		// that it is a private channel not meant to be announced to
  1387  		// the greater network, so avoid sending channel updates for
  1388  		// this channel to not leak its
  1389  		// existence.
  1390  		if info.AuthProof == nil {
  1391  			log.Debugf("Skipping retransmission of channel "+
  1392  				"without AuthProof: %v", info.ChannelID)
  1393  			return nil
  1394  		}
  1395  
  1396  		// We make a note that we have at least one public channel. We
  1397  		// use this to determine whether we should send a node
  1398  		// announcement below.
  1399  		havePublicChannels = true
  1400  
  1401  		// If this edge has a ChannelUpdate that was created before the
  1402  		// introduction of the MaxHTLC field, then we'll update this
  1403  		// edge to propagate this information in the network.
  1404  		if !edge.MessageFlags.HasMaxHtlc() {
  1405  			// We'll make sure we support the new max_htlc field if
  1406  			// not already present.
  1407  			edge.MessageFlags |= lnwire.ChanUpdateOptionMaxHtlc
  1408  			edge.MaxHTLC = lnwire.NewMAtomsFromAtoms(info.Capacity)
  1409  
  1410  			edgesToUpdate = append(edgesToUpdate, updateTuple{
  1411  				info: info,
  1412  				edge: edge,
  1413  			})
  1414  			return nil
  1415  		}
  1416  
  1417  		timeElapsed := now.Sub(edge.LastUpdate)
  1418  
  1419  		// If it's been longer than RebroadcastInterval since we've
  1420  		// re-broadcasted the channel, add the channel to the set of
  1421  		// edges we need to update.
  1422  		if timeElapsed >= d.cfg.RebroadcastInterval {
  1423  			edgesToUpdate = append(edgesToUpdate, updateTuple{
  1424  				info: info,
  1425  				edge: edge,
  1426  			})
  1427  		}
  1428  
  1429  		return nil
  1430  	})
  1431  	if err != nil && err != channeldb.ErrGraphNoEdgesFound {
  1432  		return fmt.Errorf("unable to retrieve outgoing channels: %v",
  1433  			err)
  1434  	}
  1435  
  1436  	var signedUpdates []lnwire.Message
  1437  	for _, chanToUpdate := range edgesToUpdate {
  1438  		// Re-sign and update the channel on disk and retrieve our
  1439  		// ChannelUpdate to broadcast.
  1440  		chanAnn, chanUpdate, err := d.updateChannel(
  1441  			chanToUpdate.info, chanToUpdate.edge,
  1442  		)
  1443  		if err != nil {
  1444  			return fmt.Errorf("unable to update channel: %v", err)
  1445  		}
  1446  
  1447  		// If we have a valid announcement to transmit, then we'll send
  1448  		// that along with the update.
  1449  		if chanAnn != nil {
  1450  			signedUpdates = append(signedUpdates, chanAnn)
  1451  		}
  1452  
  1453  		signedUpdates = append(signedUpdates, chanUpdate)
  1454  	}
  1455  
  1456  	// If we don't have any public channels, we return as we don't want to
  1457  	// broadcast anything that would reveal our existence.
  1458  	if !havePublicChannels {
  1459  		return nil
  1460  	}
  1461  
  1462  	// We'll also check that our NodeAnnouncement is not too old.
  1463  	currentNodeAnn, err := d.cfg.SelfNodeAnnouncement(false)
  1464  	if err != nil {
  1465  		return fmt.Errorf("unable to get current node announment: %v",
  1466  			err)
  1467  	}
  1468  
  1469  	timestamp := time.Unix(int64(currentNodeAnn.Timestamp), 0)
  1470  	timeElapsed := now.Sub(timestamp)
  1471  
  1472  	// If it's been a full day since we've re-broadcasted the
  1473  	// node announcement, refresh it and resend it.
  1474  	nodeAnnStr := ""
  1475  	if timeElapsed >= d.cfg.RebroadcastInterval {
  1476  		newNodeAnn, err := d.cfg.SelfNodeAnnouncement(true)
  1477  		if err != nil {
  1478  			return fmt.Errorf("unable to get refreshed node "+
  1479  				"announcement: %v", err)
  1480  		}
  1481  
  1482  		signedUpdates = append(signedUpdates, &newNodeAnn)
  1483  		nodeAnnStr = " and our refreshed node announcement"
  1484  
  1485  		// Before broadcasting the refreshed node announcement, add it
  1486  		// to our own graph.
  1487  		if err := d.addNode(&newNodeAnn); err != nil {
  1488  			log.Errorf("Unable to add refreshed node announcement "+
  1489  				"to graph: %v", err)
  1490  		}
  1491  	}
  1492  
  1493  	// If we don't have any updates to re-broadcast, then we'll exit
  1494  	// early.
  1495  	if len(signedUpdates) == 0 {
  1496  		return nil
  1497  	}
  1498  
  1499  	log.Infof("Retransmitting %v outgoing channels%v",
  1500  		len(edgesToUpdate), nodeAnnStr)
  1501  
  1502  	// With all the wire announcements properly crafted, we'll broadcast
  1503  	// our known outgoing channels to all our immediate peers.
  1504  	if err := d.cfg.Broadcast(nil, signedUpdates...); err != nil {
  1505  		return fmt.Errorf("unable to re-broadcast channels: %v", err)
  1506  	}
  1507  
  1508  	return nil
  1509  }
  1510  
  1511  // processChanPolicyUpdate generates a new set of channel updates for the
  1512  // provided list of edges and updates the backing ChannelGraphSource.
  1513  func (d *AuthenticatedGossiper) processChanPolicyUpdate(
  1514  	edgesToUpdate []EdgeWithInfo) ([]networkMsg, error) {
  1515  
  1516  	var chanUpdates []networkMsg
  1517  	for _, edgeInfo := range edgesToUpdate {
  1518  		// Now that we've collected all the channels we need to update,
  1519  		// we'll re-sign and update the backing ChannelGraphSource, and
  1520  		// retrieve our ChannelUpdate to broadcast.
  1521  		_, chanUpdate, err := d.updateChannel(
  1522  			edgeInfo.Info, edgeInfo.Edge,
  1523  		)
  1524  		if err != nil {
  1525  			return nil, err
  1526  		}
  1527  
  1528  		// We'll avoid broadcasting any updates for private channels to
  1529  		// avoid directly giving away their existence. Instead, we'll
  1530  		// send the update directly to the remote party.
  1531  		if edgeInfo.Info.AuthProof == nil {
  1532  			remotePubKey := remotePubFromChanInfo(
  1533  				edgeInfo.Info, chanUpdate.ChannelFlags,
  1534  			)
  1535  			err := d.reliableSender.sendMessage(
  1536  				chanUpdate, remotePubKey,
  1537  			)
  1538  			if err != nil {
  1539  				log.Errorf("Unable to reliably send %v for "+
  1540  					"channel=%v to peer=%x: %v",
  1541  					chanUpdate.MsgType(),
  1542  					chanUpdate.ShortChannelID,
  1543  					remotePubKey, err)
  1544  			}
  1545  			continue
  1546  		}
  1547  
  1548  		// We set ourselves as the source of this message to indicate
  1549  		// that we shouldn't skip any peers when sending this message.
  1550  		chanUpdates = append(chanUpdates, networkMsg{
  1551  			source: d.selfKey,
  1552  			msg:    chanUpdate,
  1553  		})
  1554  	}
  1555  
  1556  	return chanUpdates, nil
  1557  }
  1558  
  1559  // remotePubFromChanInfo returns the public key of the remote peer given a
  1560  // ChannelEdgeInfo that describe a channel we have with them.
  1561  func remotePubFromChanInfo(chanInfo *channeldb.ChannelEdgeInfo,
  1562  	chanFlags lnwire.ChanUpdateChanFlags) [33]byte {
  1563  
  1564  	var remotePubKey [33]byte
  1565  	switch {
  1566  	case chanFlags&lnwire.ChanUpdateDirection == 0:
  1567  		remotePubKey = chanInfo.NodeKey2Bytes
  1568  	case chanFlags&lnwire.ChanUpdateDirection == 1:
  1569  		remotePubKey = chanInfo.NodeKey1Bytes
  1570  	}
  1571  
  1572  	return remotePubKey
  1573  }
  1574  
  1575  // processRejectedEdge examines a rejected edge to see if we can extract any
  1576  // new announcements from it.  An edge will get rejected if we already added
  1577  // the same edge without AuthProof to the graph. If the received announcement
  1578  // contains a proof, we can add this proof to our edge.  We can end up in this
  1579  // situation in the case where we create a channel, but for some reason fail
  1580  // to receive the remote peer's proof, while the remote peer is able to fully
  1581  // assemble the proof and craft the ChannelAnnouncement.
  1582  func (d *AuthenticatedGossiper) processRejectedEdge(
  1583  	chanAnnMsg *lnwire.ChannelAnnouncement,
  1584  	proof *channeldb.ChannelAuthProof) ([]networkMsg, error) {
  1585  
  1586  	// First, we'll fetch the state of the channel as we know if from the
  1587  	// database.
  1588  	chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID(
  1589  		chanAnnMsg.ShortChannelID,
  1590  	)
  1591  	if err != nil {
  1592  		return nil, err
  1593  	}
  1594  
  1595  	// The edge is in the graph, and has a proof attached, then we'll just
  1596  	// reject it as normal.
  1597  	if chanInfo.AuthProof != nil {
  1598  		return nil, nil
  1599  	}
  1600  
  1601  	// Otherwise, this means that the edge is within the graph, but it
  1602  	// doesn't yet have a proper proof attached. If we did not receive
  1603  	// the proof such that we now can add it, there's nothing more we
  1604  	// can do.
  1605  	if proof == nil {
  1606  		return nil, nil
  1607  	}
  1608  
  1609  	// We'll then create then validate the new fully assembled
  1610  	// announcement.
  1611  	chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement(
  1612  		proof, chanInfo, e1, e2,
  1613  	)
  1614  	if err != nil {
  1615  		return nil, err
  1616  	}
  1617  	err = routing.ValidateChannelAnn(chanAnn)
  1618  	if err != nil {
  1619  		err := fmt.Errorf("assembled channel announcement proof "+
  1620  			"for shortChanID=%s isn't valid: %v",
  1621  			chanAnnMsg.ShortChannelID, err)
  1622  		log.Error(err)
  1623  		return nil, err
  1624  	}
  1625  
  1626  	// If everything checks out, then we'll add the fully assembled proof
  1627  	// to the database.
  1628  	err = d.cfg.Router.AddProof(chanAnnMsg.ShortChannelID, proof)
  1629  	if err != nil {
  1630  		err := fmt.Errorf("unable add proof to shortChanID=%s: %v",
  1631  			chanAnnMsg.ShortChannelID, err)
  1632  		log.Error(err)
  1633  		return nil, err
  1634  	}
  1635  
  1636  	// As we now have a complete channel announcement for this channel,
  1637  	// we'll construct the announcement so they can be broadcast out to all
  1638  	// our peers.
  1639  	announcements := make([]networkMsg, 0, 3)
  1640  	announcements = append(announcements, networkMsg{
  1641  		source: d.selfKey,
  1642  		msg:    chanAnn,
  1643  	})
  1644  	if e1Ann != nil {
  1645  		announcements = append(announcements, networkMsg{
  1646  			source: d.selfKey,
  1647  			msg:    e1Ann,
  1648  		})
  1649  	}
  1650  	if e2Ann != nil {
  1651  		announcements = append(announcements, networkMsg{
  1652  			source: d.selfKey,
  1653  			msg:    e2Ann,
  1654  		})
  1655  
  1656  	}
  1657  
  1658  	return announcements, nil
  1659  }
  1660  
  1661  // addNode processes the given node announcement, and adds it to our channel
  1662  // graph.
  1663  func (d *AuthenticatedGossiper) addNode(msg *lnwire.NodeAnnouncement,
  1664  	op ...batch.SchedulerOption) error {
  1665  
  1666  	if err := routing.ValidateNodeAnn(msg); err != nil {
  1667  		return fmt.Errorf("unable to validate node announcement: %v",
  1668  			err)
  1669  	}
  1670  
  1671  	timestamp := time.Unix(int64(msg.Timestamp), 0)
  1672  	features := lnwire.NewFeatureVector(msg.Features, lnwire.Features)
  1673  	node := &channeldb.LightningNode{
  1674  		HaveNodeAnnouncement: true,
  1675  		LastUpdate:           timestamp,
  1676  		Addresses:            msg.Addresses,
  1677  		PubKeyBytes:          msg.NodeID,
  1678  		Alias:                msg.Alias.String(),
  1679  		AuthSigBytes:         msg.Signature.ToSignatureBytes(),
  1680  		Features:             features,
  1681  		Color:                msg.RGBColor,
  1682  		ExtraOpaqueData:      msg.ExtraOpaqueData,
  1683  	}
  1684  
  1685  	return d.cfg.Router.AddNode(node, op...)
  1686  }
  1687  
  1688  // isPremature decides whether a given network message has a block height+delta
  1689  // value specified in the future. If so, the message will be added to the
  1690  // future message map and be processed when the block height as reached.
  1691  //
  1692  // NOTE: must be used inside a lock.
  1693  func (d *AuthenticatedGossiper) isPremature(chanID lnwire.ShortChannelID,
  1694  	delta uint32, msg *networkMsg) bool {
  1695  	// TODO(roasbeef) make height delta 6
  1696  	//  * or configurable
  1697  
  1698  	msgHeight := chanID.BlockHeight + delta
  1699  
  1700  	// The message height is smaller or equal to our best known height,
  1701  	// thus the message is mature.
  1702  	if msgHeight <= d.bestHeight {
  1703  		return false
  1704  	}
  1705  
  1706  	// Add the premature message to our future messages which will
  1707  	// be resent once the block height has reached.
  1708  	//
  1709  	// Init an empty cached message and overwrite it if there are cached
  1710  	// messages found.
  1711  	cachedMsgs := &cachedNetworkMsg{
  1712  		msgs: make([]*networkMsg, 0),
  1713  	}
  1714  
  1715  	result, err := d.futureMsgs.Get(msgHeight)
  1716  	// No error returned means we have old messages cached.
  1717  	if err == nil {
  1718  		cachedMsgs = result.(*cachedNetworkMsg)
  1719  	}
  1720  
  1721  	// Copy the networkMsgs since the old message's err chan will
  1722  	// be consumed.
  1723  	copied := &networkMsg{
  1724  		peer:              msg.peer,
  1725  		source:            msg.source,
  1726  		msg:               msg.msg,
  1727  		optionalMsgFields: msg.optionalMsgFields,
  1728  		isRemote:          msg.isRemote,
  1729  		err:               make(chan error, 1),
  1730  	}
  1731  
  1732  	// Add the network message.
  1733  	cachedMsgs.msgs = append(cachedMsgs.msgs, copied)
  1734  	_, err = d.futureMsgs.Put(msgHeight, cachedMsgs)
  1735  	if err != nil {
  1736  		log.Errorf("Adding future message got error: %v", err)
  1737  	}
  1738  
  1739  	log.Debugf("Network message: %v added to future messages for "+
  1740  		"msgHeight=%d, bestHeight=%d", msg.msg.MsgType(),
  1741  		msgHeight, d.bestHeight)
  1742  
  1743  	return true
  1744  }
  1745  
  1746  // processNetworkAnnouncement processes a new network relate authenticated
  1747  // channel or node announcement or announcements proofs. If the announcement
  1748  // didn't affect the internal state due to either being out of date, invalid,
  1749  // or redundant, then nil is returned. Otherwise, the set of announcements will
  1750  // be returned which should be broadcasted to the rest of the network. The
  1751  // boolean returned indicates whether any dependents of the announcement should
  1752  // attempt to be processed as well.
  1753  func (d *AuthenticatedGossiper) processNetworkAnnouncement(
  1754  	nMsg *networkMsg) ([]networkMsg, bool) {
  1755  
  1756  	log.Debugf("Processing network message: peer=%v, source=%x, msg=%s, "+
  1757  		"is_remote=%v", nMsg.peer, nMsg.source.SerializeCompressed(),
  1758  		nMsg.msg.MsgType(), nMsg.isRemote)
  1759  
  1760  	// If this is a remote update, we set the scheduler option to lazily
  1761  	// add it to the graph.
  1762  	var schedulerOp []batch.SchedulerOption
  1763  	if nMsg.isRemote {
  1764  		schedulerOp = append(schedulerOp, batch.LazyAdd())
  1765  	}
  1766  
  1767  	var announcements []networkMsg
  1768  
  1769  	switch msg := nMsg.msg.(type) {
  1770  
  1771  	// A new node announcement has arrived which either presents new
  1772  	// information about a node in one of the channels we know about, or a
  1773  	// updating previously advertised information.
  1774  	case *lnwire.NodeAnnouncement:
  1775  		timestamp := time.Unix(int64(msg.Timestamp), 0)
  1776  
  1777  		// We'll quickly ask the router if it already has a
  1778  		// newer update for this node so we can skip validating
  1779  		// signatures if not required.
  1780  		if d.cfg.Router.IsStaleNode(msg.NodeID, timestamp) {
  1781  			log.Debugf("Skipped processing stale node: %x",
  1782  				msg.NodeID)
  1783  			nMsg.err <- nil
  1784  			return nil, true
  1785  		}
  1786  
  1787  		if err := d.addNode(msg, schedulerOp...); err != nil {
  1788  			log.Debugf("Adding node: %x got error: %v",
  1789  				msg.NodeID, err)
  1790  
  1791  			if !routing.IsError(
  1792  				err,
  1793  				routing.ErrOutdated,
  1794  				routing.ErrIgnored,
  1795  				routing.ErrVBarrierShuttingDown,
  1796  			) {
  1797  				log.Error(err)
  1798  			}
  1799  
  1800  			nMsg.err <- err
  1801  			return nil, false
  1802  		}
  1803  
  1804  		// In order to ensure we don't leak unadvertised nodes, we'll
  1805  		// make a quick check to ensure this node intends to publicly
  1806  		// advertise itself to the network.
  1807  		isPublic, err := d.cfg.Router.IsPublicNode(msg.NodeID)
  1808  		if err != nil {
  1809  			log.Errorf("Unable to determine if node %x is "+
  1810  				"advertised: %v", msg.NodeID, err)
  1811  			nMsg.err <- err
  1812  			return nil, false
  1813  		}
  1814  
  1815  		// If it does, we'll add their announcement to our batch so that
  1816  		// it can be broadcast to the rest of our peers.
  1817  		if isPublic {
  1818  			announcements = append(announcements, networkMsg{
  1819  				peer:   nMsg.peer,
  1820  				source: nMsg.source,
  1821  				msg:    msg,
  1822  			})
  1823  		} else {
  1824  			log.Tracef("Skipping broadcasting node announcement "+
  1825  				"for %x due to being unadvertised", msg.NodeID)
  1826  		}
  1827  
  1828  		if nMsg.isRemote {
  1829  			d.updateGossiperMsgTS(nMsg.peer.PubKey(), timestamp)
  1830  		}
  1831  
  1832  		nMsg.err <- nil
  1833  		// TODO(roasbeef): get rid of the above
  1834  		return announcements, true
  1835  
  1836  	// A new channel announcement has arrived, this indicates the
  1837  	// *creation* of a new channel within the network. This only advertises
  1838  	// the existence of a channel and not yet the routing policies in
  1839  	// either direction of the channel.
  1840  	case *lnwire.ChannelAnnouncement:
  1841  		// We'll ignore any channel announcements that target any chain
  1842  		// other than the set of chains we know of.
  1843  		if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) {
  1844  			err := fmt.Errorf("ignoring ChannelAnnouncement from "+
  1845  				"chain=%v, gossiper on chain=%v", msg.ChainHash,
  1846  				d.cfg.ChainHash)
  1847  			log.Errorf(err.Error())
  1848  
  1849  			key := newRejectCacheKey(
  1850  				msg.ShortChannelID.ToUint64(),
  1851  				sourceToPub(nMsg.source),
  1852  			)
  1853  			_, _ = d.recentRejects.Put(key, &cachedReject{})
  1854  
  1855  			nMsg.err <- err
  1856  			return nil, false
  1857  		}
  1858  
  1859  		// If the advertised inclusionary block is beyond our knowledge
  1860  		// of the chain tip, then we'll ignore for it now.
  1861  		d.Lock()
  1862  		if nMsg.isRemote && d.isPremature(msg.ShortChannelID, 0, nMsg) {
  1863  			log.Warnf("Announcement for chan_id=(%v), is "+
  1864  				"premature: advertises height %v, only "+
  1865  				"height %v is known",
  1866  				msg.ShortChannelID.ToUint64(),
  1867  				msg.ShortChannelID.BlockHeight,
  1868  				d.bestHeight)
  1869  			d.Unlock()
  1870  			nMsg.err <- nil
  1871  			return nil, false
  1872  		}
  1873  		d.Unlock()
  1874  
  1875  		// At this point, we'll now ask the router if this is a
  1876  		// zombie/known edge. If so we can skip all the processing
  1877  		// below.
  1878  		if d.cfg.Router.IsKnownEdge(msg.ShortChannelID) {
  1879  			nMsg.err <- nil
  1880  			return nil, true
  1881  		}
  1882  
  1883  		// If this is a remote channel announcement, then we'll validate
  1884  		// all the signatures within the proof as it should be well
  1885  		// formed.
  1886  		var proof *channeldb.ChannelAuthProof
  1887  		if nMsg.isRemote {
  1888  			if err := routing.ValidateChannelAnn(msg); err != nil {
  1889  				err := fmt.Errorf("unable to validate "+
  1890  					"announcement: %v", err)
  1891  
  1892  				key := newRejectCacheKey(
  1893  					msg.ShortChannelID.ToUint64(),
  1894  					sourceToPub(nMsg.source),
  1895  				)
  1896  				_, _ = d.recentRejects.Put(key, &cachedReject{})
  1897  
  1898  				log.Error(err)
  1899  				nMsg.err <- err
  1900  				return nil, false
  1901  			}
  1902  
  1903  			// If the proof checks out, then we'll save the proof
  1904  			// itself to the database so we can fetch it later when
  1905  			// gossiping with other nodes.
  1906  			proof = &channeldb.ChannelAuthProof{
  1907  				NodeSig1Bytes:   msg.NodeSig1.ToSignatureBytes(),
  1908  				NodeSig2Bytes:   msg.NodeSig2.ToSignatureBytes(),
  1909  				DecredSig1Bytes: msg.DecredSig1.ToSignatureBytes(),
  1910  				DecredSig2Bytes: msg.DecredSig2.ToSignatureBytes(),
  1911  			}
  1912  		}
  1913  
  1914  		// With the proof validate (if necessary), we can now store it
  1915  		// within the database for our path finding and syncing needs.
  1916  		var featureBuf bytes.Buffer
  1917  		if err := msg.Features.Encode(&featureBuf); err != nil {
  1918  			log.Errorf("unable to encode features: %v", err)
  1919  			nMsg.err <- err
  1920  			return nil, false
  1921  		}
  1922  
  1923  		edge := &channeldb.ChannelEdgeInfo{
  1924  			ChannelID:       msg.ShortChannelID.ToUint64(),
  1925  			ChainHash:       msg.ChainHash,
  1926  			NodeKey1Bytes:   msg.NodeID1,
  1927  			NodeKey2Bytes:   msg.NodeID2,
  1928  			DecredKey1Bytes: msg.DecredKey1,
  1929  			DecredKey2Bytes: msg.DecredKey2,
  1930  			AuthProof:       proof,
  1931  			Features:        featureBuf.Bytes(),
  1932  			ExtraOpaqueData: msg.ExtraOpaqueData,
  1933  		}
  1934  
  1935  		// If there were any optional message fields provided, we'll
  1936  		// include them in its serialized disk representation now.
  1937  		if nMsg.optionalMsgFields != nil {
  1938  			if nMsg.optionalMsgFields.capacity != nil {
  1939  				edge.Capacity = *nMsg.optionalMsgFields.capacity
  1940  			}
  1941  			if nMsg.optionalMsgFields.channelPoint != nil {
  1942  				edge.ChannelPoint = *nMsg.optionalMsgFields.channelPoint
  1943  			}
  1944  		}
  1945  
  1946  		// We will add the edge to the channel router. If the nodes
  1947  		// present in this channel are not present in the database, a
  1948  		// partial node will be added to represent each node while we
  1949  		// wait for a node announcement.
  1950  		//
  1951  		// Before we add the edge to the database, we obtain
  1952  		// the mutex for this channel ID. We do this to ensure
  1953  		// no other goroutine has read the database and is now
  1954  		// making decisions based on this DB state, before it
  1955  		// writes to the DB.
  1956  		d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
  1957  		err := d.cfg.Router.AddEdge(edge, schedulerOp...)
  1958  		if err != nil {
  1959  			defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
  1960  
  1961  			// If the edge was rejected due to already being known,
  1962  			// then it may be that case that this new message has a
  1963  			// fresh channel proof, so we'll check.
  1964  			if routing.IsError(err, routing.ErrIgnored) {
  1965  				// Attempt to process the rejected message to
  1966  				// see if we get any new announcements.
  1967  				anns, rErr := d.processRejectedEdge(msg, proof)
  1968  				if rErr != nil {
  1969  
  1970  					key := newRejectCacheKey(
  1971  						msg.ShortChannelID.ToUint64(),
  1972  						sourceToPub(nMsg.source),
  1973  					)
  1974  					_, _ = d.recentRejects.Put(key, &cachedReject{})
  1975  
  1976  					nMsg.err <- rErr
  1977  					return nil, false
  1978  				}
  1979  
  1980  				// If while processing this rejected edge, we
  1981  				// realized there's a set of announcements we
  1982  				// could extract, then we'll return those
  1983  				// directly.
  1984  				if len(anns) != 0 {
  1985  					nMsg.err <- nil
  1986  					return anns, true
  1987  				}
  1988  
  1989  				// Otherwise, this is just a regular rejected
  1990  				// edge.
  1991  				log.Debugf("Router rejected channel "+
  1992  					"edge: %v", err)
  1993  			} else {
  1994  				log.Debugf("Router rejected channel "+
  1995  					"edge: %v", err)
  1996  
  1997  				key := newRejectCacheKey(
  1998  					msg.ShortChannelID.ToUint64(),
  1999  					sourceToPub(nMsg.source),
  2000  				)
  2001  				_, _ = d.recentRejects.Put(key, &cachedReject{})
  2002  			}
  2003  
  2004  			nMsg.err <- err
  2005  			return nil, false
  2006  		}
  2007  
  2008  		// If err is nil, release the lock immediately.
  2009  		d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
  2010  
  2011  		// If we earlier received any ChannelUpdates for this channel,
  2012  		// we can now process them, as the channel is added to the
  2013  		// graph.
  2014  		shortChanID := msg.ShortChannelID
  2015  		var channelUpdates []*networkMsg
  2016  
  2017  		earlyChanUpdates, err := d.prematureChannelUpdates.Get(shortChanID)
  2018  		if err == nil {
  2019  			// There was actually an entry in the map, so we'll
  2020  			// accumulate it. We don't worry about deletion, since
  2021  			// it'll eventually fall out anyway.
  2022  			chanMsgs := earlyChanUpdates.(*cachedNetworkMsg)
  2023  			channelUpdates = append(channelUpdates, chanMsgs.msgs...)
  2024  		}
  2025  
  2026  		// Launch a new goroutine to handle each ChannelUpdate, this to
  2027  		// ensure we don't block here, as we can handle only one
  2028  		// announcement at a time.
  2029  		for _, cu := range channelUpdates {
  2030  			d.wg.Add(1)
  2031  			go func(nMsg *networkMsg) {
  2032  				defer d.wg.Done()
  2033  
  2034  				switch msg := nMsg.msg.(type) {
  2035  
  2036  				// Reprocess the message, making sure we return
  2037  				// an error to the original caller in case the
  2038  				// gossiper shuts down.
  2039  				case *lnwire.ChannelUpdate:
  2040  					log.Debugf("Reprocessing"+
  2041  						" ChannelUpdate for "+
  2042  						"shortChanID=%s",
  2043  						msg.ShortChannelID)
  2044  
  2045  					select {
  2046  					case d.networkMsgs <- nMsg:
  2047  					case <-d.quit:
  2048  						nMsg.err <- ErrGossiperShuttingDown
  2049  					}
  2050  
  2051  				// We don't expect any other message type than
  2052  				// ChannelUpdate to be in this map.
  2053  				default:
  2054  					log.Errorf("Unsupported message type "+
  2055  						"found among ChannelUpdates: "+
  2056  						"%T", msg)
  2057  				}
  2058  			}(cu)
  2059  		}
  2060  
  2061  		// Channel announcement was successfully proceeded and know it
  2062  		// might be broadcast to other connected nodes if it was
  2063  		// announcement with proof (remote).
  2064  		if proof != nil {
  2065  			announcements = append(announcements, networkMsg{
  2066  				peer:   nMsg.peer,
  2067  				source: nMsg.source,
  2068  				msg:    msg,
  2069  			})
  2070  		}
  2071  
  2072  		nMsg.err <- nil
  2073  		return announcements, true
  2074  
  2075  	// A new authenticated channel edge update has arrived. This indicates
  2076  	// that the directional information for an already known channel has
  2077  	// been updated.
  2078  	case *lnwire.ChannelUpdate:
  2079  		// We'll ignore any channel announcements that target any chain
  2080  		// other than the set of chains we know of.
  2081  		if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) {
  2082  			err := fmt.Errorf("ignoring ChannelUpdate from "+
  2083  				"chain=%v, gossiper on chain=%v", msg.ChainHash,
  2084  				d.cfg.ChainHash)
  2085  			log.Errorf(err.Error())
  2086  
  2087  			key := newRejectCacheKey(
  2088  				msg.ShortChannelID.ToUint64(),
  2089  				sourceToPub(nMsg.source),
  2090  			)
  2091  			_, _ = d.recentRejects.Put(key, &cachedReject{})
  2092  
  2093  			nMsg.err <- err
  2094  			return nil, false
  2095  		}
  2096  
  2097  		blockHeight := msg.ShortChannelID.BlockHeight
  2098  		shortChanID := msg.ShortChannelID
  2099  
  2100  		// If the advertised inclusionary block is beyond our knowledge
  2101  		// of the chain tip, then we'll put the announcement in limbo
  2102  		// to be fully verified once we advance forward in the chain.
  2103  		d.Lock()
  2104  		if nMsg.isRemote && d.isPremature(msg.ShortChannelID, 0, nMsg) {
  2105  			log.Warnf("Update announcement for "+
  2106  				"short_chan_id(%s), is premature: advertises "+
  2107  				"height %v, only height %v is known",
  2108  				shortChanID, blockHeight,
  2109  				d.bestHeight)
  2110  			d.Unlock()
  2111  			nMsg.err <- nil
  2112  			return nil, false
  2113  		}
  2114  		d.Unlock()
  2115  
  2116  		// Before we perform any of the expensive checks below, we'll
  2117  		// check whether this update is stale or is for a zombie
  2118  		// channel in order to quickly reject it.
  2119  		timestamp := time.Unix(int64(msg.Timestamp), 0)
  2120  		if d.cfg.Router.IsStaleEdgePolicy(
  2121  			msg.ShortChannelID, timestamp, msg.ChannelFlags,
  2122  		) {
  2123  
  2124  			log.Debugf("Ignored stale edge policy: peer=%v, "+
  2125  				"source=%x, msg=%s, is_remote=%v, channel=%s, "+
  2126  				"ts=%s", nMsg.peer,
  2127  				nMsg.source.SerializeCompressed(),
  2128  				nMsg.msg.MsgType(), nMsg.isRemote,
  2129  				msg.ShortChannelID,
  2130  				timestamp.Format(time.RFC3339))
  2131  
  2132  			nMsg.err <- nil
  2133  			return nil, true
  2134  		}
  2135  
  2136  		// Get the node pub key as far as we don't have it in channel
  2137  		// update announcement message. We'll need this to properly
  2138  		// verify message signature.
  2139  		//
  2140  		// We make sure to obtain the mutex for this channel ID
  2141  		// before we access the database. This ensures the state
  2142  		// we read from the database has not changed between this
  2143  		// point and when we call UpdateEdge() later.
  2144  		d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
  2145  		defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
  2146  		chanInfo, edge1, edge2, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID)
  2147  		switch err {
  2148  		// No error, break.
  2149  		case nil:
  2150  			break
  2151  
  2152  		case channeldb.ErrZombieEdge:
  2153  			err = d.processZombieUpdate(chanInfo, msg)
  2154  			if err != nil {
  2155  				log.Debug(err)
  2156  				nMsg.err <- err
  2157  				return nil, false
  2158  			}
  2159  
  2160  			// We'll fallthrough to ensure we stash the update until
  2161  			// we receive its corresponding ChannelAnnouncement.
  2162  			// This is needed to ensure the edge exists in the graph
  2163  			// before applying the update.
  2164  			fallthrough
  2165  		case channeldb.ErrGraphNotFound:
  2166  			fallthrough
  2167  		case channeldb.ErrGraphNoEdgesFound:
  2168  			fallthrough
  2169  		case channeldb.ErrEdgeNotFound:
  2170  			// If the edge corresponding to this ChannelUpdate was
  2171  			// not found in the graph, this might be a channel in
  2172  			// the process of being opened, and we haven't processed
  2173  			// our own ChannelAnnouncement yet, hence it is not
  2174  			// found in the graph. This usually gets resolved after
  2175  			// the channel proofs are exchanged and the channel is
  2176  			// broadcasted to the rest of the network, but in case
  2177  			// this is a private channel this won't ever happen.
  2178  			// This can also happen in the case of a zombie channel
  2179  			// with a fresh update for which we don't have a
  2180  			// ChannelAnnouncement for since we reject them. Because
  2181  			// of this, we temporarily add it to a map, and
  2182  			// reprocess it after our own ChannelAnnouncement has
  2183  			// been processed.
  2184  			earlyMsgs, err := d.prematureChannelUpdates.Get(
  2185  				shortChanID,
  2186  			)
  2187  			switch {
  2188  			// Nothing in the cache yet, we can just directly
  2189  			// insert this element.
  2190  			case err == cache.ErrElementNotFound:
  2191  				_, _ = d.prematureChannelUpdates.Put(
  2192  					shortChanID, &cachedNetworkMsg{
  2193  						msgs: []*networkMsg{nMsg},
  2194  					})
  2195  
  2196  			// There's already something in the cache, so we'll
  2197  			// combine the set of messages into a single value.
  2198  			default:
  2199  				msgs := earlyMsgs.(*cachedNetworkMsg).msgs
  2200  				msgs = append(msgs, nMsg)
  2201  				_, _ = d.prematureChannelUpdates.Put(
  2202  					shortChanID, &cachedNetworkMsg{
  2203  						msgs: msgs,
  2204  					})
  2205  			}
  2206  
  2207  			log.Debugf("Got ChannelUpdate for edge not found in "+
  2208  				"graph(shortChanID=%s), saving for "+
  2209  				"reprocessing later", shortChanID)
  2210  
  2211  			// NOTE: We don't return anything on the error channel
  2212  			// for this message, as we expect that will be done when
  2213  			// this ChannelUpdate is later reprocessed.
  2214  			return nil, false
  2215  
  2216  		default:
  2217  			err := fmt.Errorf("unable to validate channel update "+
  2218  				"short_chan_id=%s: %v", msg.ShortChannelID, err)
  2219  			log.Error(err)
  2220  			nMsg.err <- err
  2221  
  2222  			key := newRejectCacheKey(
  2223  				msg.ShortChannelID.ToUint64(),
  2224  				sourceToPub(nMsg.source),
  2225  			)
  2226  			_, _ = d.recentRejects.Put(key, &cachedReject{})
  2227  
  2228  			return nil, false
  2229  		}
  2230  
  2231  		// The least-significant bit in the flag on the channel update
  2232  		// announcement tells us "which" side of the channels directed
  2233  		// edge is being updated.
  2234  		var (
  2235  			pubKey       *secp256k1.PublicKey
  2236  			edgeToUpdate *channeldb.ChannelEdgePolicy
  2237  		)
  2238  		direction := msg.ChannelFlags & lnwire.ChanUpdateDirection
  2239  		switch direction {
  2240  		case 0:
  2241  			pubKey, _ = chanInfo.NodeKey1()
  2242  			edgeToUpdate = edge1
  2243  		case 1:
  2244  			pubKey, _ = chanInfo.NodeKey2()
  2245  			edgeToUpdate = edge2
  2246  		}
  2247  
  2248  		// If we have a previous version of the edge being updated,
  2249  		// we'll want to rate limit its updates to prevent spam
  2250  		// throughout the network.
  2251  		if nMsg.isRemote && edgeToUpdate != nil {
  2252  			// If it's a keep-alive update, we'll only propagate one
  2253  			// if it's been a day since the previous. This follows
  2254  			// our own heuristic of sending keep-alive updates after
  2255  			// the same duration (see retransmitStaleAnns).
  2256  			timeSinceLastUpdate := timestamp.Sub(edgeToUpdate.LastUpdate)
  2257  			if IsKeepAliveUpdate(msg, edgeToUpdate) {
  2258  				if timeSinceLastUpdate < d.cfg.RebroadcastInterval {
  2259  					log.Debugf("Ignoring keep alive update "+
  2260  						"not within %v period for "+
  2261  						"channel %v",
  2262  						d.cfg.RebroadcastInterval,
  2263  						shortChanID)
  2264  					nMsg.err <- nil
  2265  					return nil, false
  2266  				}
  2267  			} else {
  2268  				// If it's not, we'll allow an update per minute
  2269  				// with a maximum burst of 10. If we haven't
  2270  				// seen an update for this channel before, we'll
  2271  				// need to initialize a rate limiter for each
  2272  				// direction.
  2273  				d.Lock()
  2274  				rateLimiters, ok := d.chanUpdateRateLimiter[shortChanID.ToUint64()]
  2275  				if !ok {
  2276  					r := rate.Every(d.cfg.ChannelUpdateInterval)
  2277  					b := d.cfg.MaxChannelUpdateBurst
  2278  					rateLimiters = [2]*rate.Limiter{
  2279  						rate.NewLimiter(r, b),
  2280  						rate.NewLimiter(r, b),
  2281  					}
  2282  					d.chanUpdateRateLimiter[shortChanID.ToUint64()] = rateLimiters
  2283  				}
  2284  				d.Unlock()
  2285  
  2286  				if !rateLimiters[direction].Allow() {
  2287  					log.Debugf("Rate limiting update for "+
  2288  						"channel %v from direction %x",
  2289  						shortChanID,
  2290  						pubKey.SerializeCompressed())
  2291  					nMsg.err <- nil
  2292  					return nil, false
  2293  				}
  2294  			}
  2295  		}
  2296  
  2297  		// Validate the channel announcement with the expected public key and
  2298  		// channel capacity. In the case of an invalid channel update, we'll
  2299  		// return an error to the caller and exit early.
  2300  		err = routing.ValidateChannelUpdateAnn(pubKey, chanInfo.Capacity, msg)
  2301  		if err != nil {
  2302  			rErr := fmt.Errorf("unable to validate channel "+
  2303  				"update announcement for short_chan_id=%s: %v",
  2304  				spew.Sdump(msg.ShortChannelID), err)
  2305  
  2306  			log.Error(rErr)
  2307  			nMsg.err <- rErr
  2308  			return nil, false
  2309  		}
  2310  
  2311  		update := &channeldb.ChannelEdgePolicy{
  2312  			SigBytes:                  msg.Signature.ToSignatureBytes(),
  2313  			ChannelID:                 shortChanID.ToUint64(),
  2314  			LastUpdate:                timestamp,
  2315  			MessageFlags:              msg.MessageFlags,
  2316  			ChannelFlags:              msg.ChannelFlags,
  2317  			TimeLockDelta:             msg.TimeLockDelta,
  2318  			MinHTLC:                   msg.HtlcMinimumMAtoms,
  2319  			MaxHTLC:                   msg.HtlcMaximumMAtoms,
  2320  			FeeBaseMAtoms:             lnwire.MilliAtom(msg.BaseFee),
  2321  			FeeProportionalMillionths: lnwire.MilliAtom(msg.FeeRate),
  2322  			ExtraOpaqueData:           msg.ExtraOpaqueData,
  2323  		}
  2324  
  2325  		if err := d.cfg.Router.UpdateEdge(update, schedulerOp...); err != nil {
  2326  			if routing.IsError(
  2327  				err, routing.ErrOutdated,
  2328  				routing.ErrIgnored,
  2329  				routing.ErrVBarrierShuttingDown,
  2330  			) {
  2331  				log.Debug(err)
  2332  
  2333  			} else {
  2334  				key := newRejectCacheKey(
  2335  					msg.ShortChannelID.ToUint64(),
  2336  					sourceToPub(nMsg.source),
  2337  				)
  2338  				_, _ = d.recentRejects.Put(key, &cachedReject{})
  2339  
  2340  				log.Error(err)
  2341  			}
  2342  
  2343  			nMsg.err <- err
  2344  			return nil, false
  2345  		}
  2346  
  2347  		// If this is a local ChannelUpdate without an AuthProof, it
  2348  		// means it is an update to a channel that is not (yet)
  2349  		// supposed to be announced to the greater network. However,
  2350  		// our channel counter party will need to be given the update,
  2351  		// so we'll try sending the update directly to the remote peer.
  2352  		if !nMsg.isRemote && chanInfo.AuthProof == nil {
  2353  			// Get our peer's public key.
  2354  			remotePubKey := remotePubFromChanInfo(
  2355  				chanInfo, msg.ChannelFlags,
  2356  			)
  2357  
  2358  			log.Debugf("The message %v has no AuthProof, sending "+
  2359  				"the update to remote peer %x",
  2360  				msg.MsgType(), remotePubKey)
  2361  
  2362  			// Now, we'll attempt to send the channel update message
  2363  			// reliably to the remote peer in the background, so
  2364  			// that we don't block if the peer happens to be offline
  2365  			// at the moment.
  2366  			err := d.reliableSender.sendMessage(msg, remotePubKey)
  2367  			if err != nil {
  2368  				err := fmt.Errorf("unable to reliably send %v "+
  2369  					"for channel=%v to peer=%x: %v",
  2370  					msg.MsgType(), msg.ShortChannelID,
  2371  					remotePubKey, err)
  2372  				nMsg.err <- err
  2373  				return nil, false
  2374  			}
  2375  		}
  2376  
  2377  		// Channel update announcement was successfully processed and
  2378  		// now it can be broadcast to the rest of the network. However,
  2379  		// we'll only broadcast the channel update announcement if it
  2380  		// has an attached authentication proof.
  2381  		if chanInfo.AuthProof != nil {
  2382  			announcements = append(announcements, networkMsg{
  2383  				peer:   nMsg.peer,
  2384  				source: nMsg.source,
  2385  				msg:    msg,
  2386  			})
  2387  		}
  2388  
  2389  		if nMsg.isRemote {
  2390  			d.updateGossiperMsgTS(nMsg.peer.PubKey(), timestamp)
  2391  		}
  2392  
  2393  		nMsg.err <- nil
  2394  		return announcements, true
  2395  
  2396  	// A new signature announcement has been received. This indicates
  2397  	// willingness of nodes involved in the funding of a channel to
  2398  	// announce this new channel to the rest of the world.
  2399  	case *lnwire.AnnounceSignatures:
  2400  		needBlockHeight := msg.ShortChannelID.BlockHeight +
  2401  			d.cfg.ProofMatureDelta
  2402  		shortChanID := msg.ShortChannelID
  2403  
  2404  		prefix := "local"
  2405  		if nMsg.isRemote {
  2406  			prefix = "remote"
  2407  		}
  2408  
  2409  		log.Infof("Received new %v channel announcement for %v", prefix,
  2410  			shortChanID)
  2411  
  2412  		// By the specification, channel announcement proofs should be
  2413  		// sent after some number of confirmations after channel was
  2414  		// registered in decred blockchain. Therefore, we check if the
  2415  		// proof is premature.
  2416  		d.Lock()
  2417  		premature := d.isPremature(
  2418  			msg.ShortChannelID, d.cfg.ProofMatureDelta, nMsg,
  2419  		)
  2420  		if premature {
  2421  			log.Warnf("Premature proof announcement, current "+
  2422  				"block height lower than needed: %v < %v",
  2423  				d.bestHeight, needBlockHeight)
  2424  			d.Unlock()
  2425  			nMsg.err <- nil
  2426  			return nil, false
  2427  		}
  2428  		d.Unlock()
  2429  
  2430  		// Ensure that we know of a channel with the target channel ID
  2431  		// before proceeding further.
  2432  		//
  2433  		// We must acquire the mutex for this channel ID before getting
  2434  		// the channel from the database, to ensure what we read does
  2435  		// not change before we call AddProof() later.
  2436  		d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
  2437  		defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
  2438  
  2439  		chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID(
  2440  			shortChanID)
  2441  		if err != nil {
  2442  			// TODO(andrew.shvv) this is dangerous because remote
  2443  			// node might rewrite the waiting proof.
  2444  			proof := channeldb.NewWaitingProof(nMsg.isRemote, msg)
  2445  			err := d.cfg.WaitingProofStore.Add(proof)
  2446  			if err != nil {
  2447  				err := fmt.Errorf("unable to store "+
  2448  					"the proof for short_chan_id=%s: %v",
  2449  					shortChanID, err)
  2450  				log.Error(err)
  2451  				nMsg.err <- err
  2452  				return nil, false
  2453  			}
  2454  
  2455  			log.Infof("Orphan %v proof announcement with "+
  2456  				"short_chan_id=%s, adding "+
  2457  				"to waiting batch", prefix, shortChanID)
  2458  			nMsg.err <- nil
  2459  			return nil, false
  2460  		}
  2461  
  2462  		nodeID := nMsg.source.SerializeCompressed()
  2463  		isFirstNode := bytes.Equal(nodeID, chanInfo.NodeKey1Bytes[:])
  2464  		isSecondNode := bytes.Equal(nodeID, chanInfo.NodeKey2Bytes[:])
  2465  
  2466  		// Ensure that channel that was retrieved belongs to the peer
  2467  		// which sent the proof announcement.
  2468  		if !(isFirstNode || isSecondNode) {
  2469  			err := fmt.Errorf("channel that was received not "+
  2470  				"belongs to the peer which sent the proof, "+
  2471  				"short_chan_id=%s", shortChanID)
  2472  			log.Error(err)
  2473  			nMsg.err <- err
  2474  			return nil, false
  2475  		}
  2476  
  2477  		// If proof was sent by a local sub-system, then we'll
  2478  		// send the announcement signature to the remote node
  2479  		// so they can also reconstruct the full channel
  2480  		// announcement.
  2481  		if !nMsg.isRemote {
  2482  			var remotePubKey [33]byte
  2483  			if isFirstNode {
  2484  				remotePubKey = chanInfo.NodeKey2Bytes
  2485  			} else {
  2486  				remotePubKey = chanInfo.NodeKey1Bytes
  2487  			}
  2488  			// Since the remote peer might not be online
  2489  			// we'll call a method that will attempt to
  2490  			// deliver the proof when it comes online.
  2491  			err := d.reliableSender.sendMessage(msg, remotePubKey)
  2492  			if err != nil {
  2493  				err := fmt.Errorf("unable to reliably send %v "+
  2494  					"for channel=%v to peer=%x: %v",
  2495  					msg.MsgType(), shortChanID,
  2496  					remotePubKey, err)
  2497  				nMsg.err <- err
  2498  				return nil, false
  2499  			}
  2500  		}
  2501  
  2502  		// Check if we already have the full proof for this channel.
  2503  		if chanInfo.AuthProof != nil {
  2504  			// If we already have the fully assembled proof, then
  2505  			// the peer sending us their proof has probably not
  2506  			// received our local proof yet. So be kind and send
  2507  			// them the full proof.
  2508  			if nMsg.isRemote {
  2509  				peerID := nMsg.source.SerializeCompressed()
  2510  				log.Debugf("Got AnnounceSignatures for " +
  2511  					"channel with full proof.")
  2512  
  2513  				d.wg.Add(1)
  2514  				go func() {
  2515  					defer d.wg.Done()
  2516  					log.Debugf("Received half proof for "+
  2517  						"channel %v with existing "+
  2518  						"full proof. Sending full "+
  2519  						"proof to peer=%x",
  2520  						msg.ChannelID,
  2521  						peerID)
  2522  
  2523  					chanAnn, _, _, err := netann.CreateChanAnnouncement(
  2524  						chanInfo.AuthProof, chanInfo,
  2525  						e1, e2,
  2526  					)
  2527  					if err != nil {
  2528  						log.Errorf("unable to gen "+
  2529  							"ann: %v", err)
  2530  						return
  2531  					}
  2532  					err = nMsg.peer.SendMessage(
  2533  						false, chanAnn,
  2534  					)
  2535  					if err != nil {
  2536  						log.Errorf("Failed sending "+
  2537  							"full proof to "+
  2538  							"peer=%x: %v",
  2539  							peerID, err)
  2540  						return
  2541  					}
  2542  					log.Debugf("Full proof sent to peer=%x"+
  2543  						" for chanID=%v", peerID,
  2544  						msg.ChannelID)
  2545  				}()
  2546  			}
  2547  
  2548  			log.Debugf("Already have proof for channel "+
  2549  				"with chanID=%v", msg.ChannelID)
  2550  			nMsg.err <- nil
  2551  			return nil, true
  2552  		}
  2553  
  2554  		// Check that we received the opposite proof. If so, then we're
  2555  		// now able to construct the full proof, and create the channel
  2556  		// announcement. If we didn't receive the opposite half of the
  2557  		// proof than we should store it this one, and wait for
  2558  		// opposite to be received.
  2559  		proof := channeldb.NewWaitingProof(nMsg.isRemote, msg)
  2560  		oppositeProof, err := d.cfg.WaitingProofStore.Get(
  2561  			proof.OppositeKey(),
  2562  		)
  2563  		if err != nil && err != channeldb.ErrWaitingProofNotFound {
  2564  			err := fmt.Errorf("unable to get "+
  2565  				"the opposite proof for short_chan_id=%s: %v",
  2566  				shortChanID, err)
  2567  			log.Error(err)
  2568  			nMsg.err <- err
  2569  			return nil, false
  2570  		}
  2571  
  2572  		if err == channeldb.ErrWaitingProofNotFound {
  2573  			err := d.cfg.WaitingProofStore.Add(proof)
  2574  			if err != nil {
  2575  				err := fmt.Errorf("unable to store "+
  2576  					"the proof for short_chan_id=%s: %v",
  2577  					shortChanID, err)
  2578  				log.Error(err)
  2579  				nMsg.err <- err
  2580  				return nil, false
  2581  			}
  2582  
  2583  			log.Infof("1/2 of channel ann proof received for "+
  2584  				"short_chan_id=%s, waiting for other half",
  2585  				shortChanID)
  2586  
  2587  			nMsg.err <- nil
  2588  			return nil, false
  2589  		}
  2590  
  2591  		// We now have both halves of the channel announcement proof,
  2592  		// then we'll reconstruct the initial announcement so we can
  2593  		// validate it shortly below.
  2594  		var dbProof channeldb.ChannelAuthProof
  2595  		if isFirstNode {
  2596  			dbProof.NodeSig1Bytes = msg.NodeSignature.ToSignatureBytes()
  2597  			dbProof.NodeSig2Bytes = oppositeProof.NodeSignature.ToSignatureBytes()
  2598  			dbProof.DecredSig1Bytes = msg.DecredSignature.ToSignatureBytes()
  2599  			dbProof.DecredSig2Bytes = oppositeProof.DecredSignature.ToSignatureBytes()
  2600  		} else {
  2601  			dbProof.NodeSig1Bytes = oppositeProof.NodeSignature.ToSignatureBytes()
  2602  			dbProof.NodeSig2Bytes = msg.NodeSignature.ToSignatureBytes()
  2603  			dbProof.DecredSig1Bytes = oppositeProof.DecredSignature.ToSignatureBytes()
  2604  			dbProof.DecredSig2Bytes = msg.DecredSignature.ToSignatureBytes()
  2605  		}
  2606  		chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement(
  2607  			&dbProof, chanInfo, e1, e2,
  2608  		)
  2609  		if err != nil {
  2610  			log.Error(err)
  2611  			nMsg.err <- err
  2612  			return nil, false
  2613  		}
  2614  
  2615  		// With all the necessary components assembled validate the
  2616  		// full channel announcement proof.
  2617  		if err := routing.ValidateChannelAnn(chanAnn); err != nil {
  2618  			err := fmt.Errorf("channel  announcement proof "+
  2619  				"for short_chan_id=%s isn't valid: %v",
  2620  				shortChanID, err)
  2621  
  2622  			log.Error(err)
  2623  			nMsg.err <- err
  2624  			return nil, false
  2625  		}
  2626  
  2627  		// If the channel was returned by the router it means that
  2628  		// existence of funding point and inclusion of nodes decred
  2629  		// keys in it already checked by the router. In this stage we
  2630  		// should check that node keys are attest to the decred keys
  2631  		// by validating the signatures of announcement.  If proof is
  2632  		// valid then we'll populate the channel edge with it, so we
  2633  		// can announce it on peer connect.
  2634  		err = d.cfg.Router.AddProof(msg.ShortChannelID, &dbProof)
  2635  		if err != nil {
  2636  			err := fmt.Errorf("unable add proof to the "+
  2637  				"channel chanID=%v: %v", msg.ChannelID, err)
  2638  			log.Error(err)
  2639  			nMsg.err <- err
  2640  			return nil, false
  2641  		}
  2642  
  2643  		err = d.cfg.WaitingProofStore.Remove(proof.OppositeKey())
  2644  		if err != nil {
  2645  			err := fmt.Errorf("unable remove opposite proof "+
  2646  				"for the channel with chanID=%v: %v",
  2647  				msg.ChannelID, err)
  2648  			log.Error(err)
  2649  			nMsg.err <- err
  2650  			return nil, false
  2651  		}
  2652  
  2653  		// Proof was successfully created and now can announce the
  2654  		// channel to the remain network.
  2655  		log.Infof("Fully valid channel proof for short_chan_id=%s "+
  2656  			"constructed, adding to next ann batch",
  2657  			shortChanID)
  2658  
  2659  		// Assemble the necessary announcements to add to the next
  2660  		// broadcasting batch.
  2661  		announcements = append(announcements, networkMsg{
  2662  			peer:   nMsg.peer,
  2663  			source: nMsg.source,
  2664  			msg:    chanAnn,
  2665  		})
  2666  		if src, err := chanInfo.NodeKey1(); err == nil && e1Ann != nil {
  2667  			announcements = append(announcements, networkMsg{
  2668  				peer:   nMsg.peer,
  2669  				source: src,
  2670  				msg:    e1Ann,
  2671  			})
  2672  		}
  2673  		if src, err := chanInfo.NodeKey2(); err == nil && e2Ann != nil {
  2674  			announcements = append(announcements, networkMsg{
  2675  				peer:   nMsg.peer,
  2676  				source: src,
  2677  				msg:    e2Ann,
  2678  			})
  2679  		}
  2680  
  2681  		// We'll also send along the node announcements for each channel
  2682  		// participant if we know of them. To ensure our node
  2683  		// announcement propagates to our channel counterparty, we'll
  2684  		// set the source for each announcement to the node it belongs
  2685  		// to, otherwise we won't send it since the source gets skipped.
  2686  		// This isn't necessary for channel updates and announcement
  2687  		// signatures since we send those directly to our channel
  2688  		// counterparty through the gossiper's reliable sender.
  2689  		node1Ann, err := d.fetchNodeAnn(chanInfo.NodeKey1Bytes)
  2690  		if err != nil {
  2691  			log.Debugf("Unable to fetch node announcement for "+
  2692  				"%x: %v", chanInfo.NodeKey1Bytes, err)
  2693  		} else {
  2694  			if nodeKey1, err := chanInfo.NodeKey1(); err == nil {
  2695  				announcements = append(announcements, networkMsg{
  2696  					peer:   nMsg.peer,
  2697  					source: nodeKey1,
  2698  					msg:    node1Ann,
  2699  				})
  2700  			}
  2701  		}
  2702  		node2Ann, err := d.fetchNodeAnn(chanInfo.NodeKey2Bytes)
  2703  		if err != nil {
  2704  			log.Debugf("Unable to fetch node announcement for "+
  2705  				"%x: %v", chanInfo.NodeKey2Bytes, err)
  2706  		} else {
  2707  			if nodeKey2, err := chanInfo.NodeKey2(); err == nil {
  2708  				announcements = append(announcements, networkMsg{
  2709  					peer:   nMsg.peer,
  2710  					source: nodeKey2,
  2711  					msg:    node2Ann,
  2712  				})
  2713  			}
  2714  		}
  2715  
  2716  		nMsg.err <- nil
  2717  		return announcements, true
  2718  
  2719  	default:
  2720  		err := errors.New("wrong type of the announcement")
  2721  		nMsg.err <- err
  2722  		return nil, false
  2723  	}
  2724  }
  2725  
  2726  // processZombieUpdate determines whether the provided channel update should
  2727  // resurrect a given zombie edge.
  2728  func (d *AuthenticatedGossiper) processZombieUpdate(
  2729  	chanInfo *channeldb.ChannelEdgeInfo, msg *lnwire.ChannelUpdate) error {
  2730  
  2731  	// The least-significant bit in the flag on the channel update tells us
  2732  	// which edge is being updated.
  2733  	isNode1 := msg.ChannelFlags&lnwire.ChanUpdateDirection == 0
  2734  
  2735  	// Since we've deemed the update as not stale above, before marking it
  2736  	// live, we'll make sure it has been signed by the correct party. If we
  2737  	// have both pubkeys, either party can resurect the channel. If we've
  2738  	// already marked this with the stricter, single-sided resurrection we
  2739  	// will only have the pubkey of the node with the oldest timestamp.
  2740  	var pubKey *secp256k1.PublicKey
  2741  	switch {
  2742  	case isNode1 && chanInfo.NodeKey1Bytes != emptyPubkey:
  2743  		pubKey, _ = chanInfo.NodeKey1()
  2744  	case !isNode1 && chanInfo.NodeKey2Bytes != emptyPubkey:
  2745  		pubKey, _ = chanInfo.NodeKey2()
  2746  	}
  2747  	if pubKey == nil {
  2748  		return fmt.Errorf("incorrect pubkey to resurrect zombie "+
  2749  			"with chan_id=%v", msg.ShortChannelID)
  2750  	}
  2751  
  2752  	err := routing.VerifyChannelUpdateSignature(msg, pubKey)
  2753  	if err != nil {
  2754  		return fmt.Errorf("unable to verify channel "+
  2755  			"update signature: %v", err)
  2756  	}
  2757  
  2758  	// With the signature valid, we'll proceed to mark the
  2759  	// edge as live and wait for the channel announcement to
  2760  	// come through again.
  2761  	err = d.cfg.Router.MarkEdgeLive(msg.ShortChannelID)
  2762  	if err != nil {
  2763  		return fmt.Errorf("unable to remove edge with "+
  2764  			"chan_id=%v from zombie index: %v",
  2765  			msg.ShortChannelID, err)
  2766  	}
  2767  
  2768  	log.Debugf("Removed edge with chan_id=%v from zombie "+
  2769  		"index", msg.ShortChannelID)
  2770  
  2771  	return nil
  2772  }
  2773  
  2774  // fetchNodeAnn fetches the latest signed node announcement from our point of
  2775  // view for the node with the given public key.
  2776  func (d *AuthenticatedGossiper) fetchNodeAnn(
  2777  	pubKey [33]byte) (*lnwire.NodeAnnouncement, error) {
  2778  
  2779  	node, err := d.cfg.Router.FetchLightningNode(pubKey)
  2780  	if err != nil {
  2781  		return nil, err
  2782  	}
  2783  
  2784  	return node.NodeAnnouncement(true)
  2785  }
  2786  
  2787  // isMsgStale determines whether a message retrieved from the backing
  2788  // MessageStore is seen as stale by the current graph.
  2789  func (d *AuthenticatedGossiper) isMsgStale(msg lnwire.Message) bool {
  2790  	switch msg := msg.(type) {
  2791  	case *lnwire.AnnounceSignatures:
  2792  		chanInfo, _, _, err := d.cfg.Router.GetChannelByID(
  2793  			msg.ShortChannelID,
  2794  		)
  2795  
  2796  		// If the channel cannot be found, it is most likely a leftover
  2797  		// message for a channel that was closed, so we can consider it
  2798  		// stale.
  2799  		if err == channeldb.ErrEdgeNotFound {
  2800  			return true
  2801  		}
  2802  		if err != nil {
  2803  			log.Debugf("Unable to retrieve channel=%v from graph: "+
  2804  				"%v", err)
  2805  			return false
  2806  		}
  2807  
  2808  		// If the proof exists in the graph, then we have successfully
  2809  		// received the remote proof and assembled the full proof, so we
  2810  		// can safely delete the local proof from the database.
  2811  		return chanInfo.AuthProof != nil
  2812  
  2813  	case *lnwire.ChannelUpdate:
  2814  		_, p1, p2, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID)
  2815  
  2816  		// If the channel cannot be found, it is most likely a leftover
  2817  		// message for a channel that was closed, so we can consider it
  2818  		// stale.
  2819  		if err == channeldb.ErrEdgeNotFound {
  2820  			return true
  2821  		}
  2822  		if err != nil {
  2823  			log.Debugf("Unable to retrieve channel=%v from graph: "+
  2824  				"%v", msg.ShortChannelID, err)
  2825  			return false
  2826  		}
  2827  
  2828  		// Otherwise, we'll retrieve the correct policy that we
  2829  		// currently have stored within our graph to check if this
  2830  		// message is stale by comparing its timestamp.
  2831  		var p *channeldb.ChannelEdgePolicy
  2832  		if msg.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
  2833  			p = p1
  2834  		} else {
  2835  			p = p2
  2836  		}
  2837  
  2838  		// If the policy is still unknown, then we can consider this
  2839  		// policy fresh.
  2840  		if p == nil {
  2841  			return false
  2842  		}
  2843  
  2844  		timestamp := time.Unix(int64(msg.Timestamp), 0)
  2845  		return p.LastUpdate.After(timestamp)
  2846  
  2847  	default:
  2848  		// We'll make sure to not mark any unsupported messages as stale
  2849  		// to ensure they are not removed.
  2850  		return false
  2851  	}
  2852  }
  2853  
  2854  // updateChannel creates a new fully signed update for the channel, and updates
  2855  // the underlying graph with the new state.
  2856  func (d *AuthenticatedGossiper) updateChannel(info *channeldb.ChannelEdgeInfo,
  2857  	edge *channeldb.ChannelEdgePolicy) (*lnwire.ChannelAnnouncement,
  2858  	*lnwire.ChannelUpdate, error) {
  2859  
  2860  	// Parse the unsigned edge into a channel update.
  2861  	chanUpdate := netann.UnsignedChannelUpdateFromEdge(info, edge)
  2862  
  2863  	// We'll generate a new signature over a digest of the channel
  2864  	// announcement itself and update the timestamp to ensure it propagate.
  2865  	err := netann.SignChannelUpdate(
  2866  		d.cfg.AnnSigner, d.selfKeyLoc, chanUpdate,
  2867  		netann.ChanUpdSetTimestamp,
  2868  	)
  2869  	if err != nil {
  2870  		return nil, nil, err
  2871  	}
  2872  
  2873  	// Next, we'll set the new signature in place, and update the reference
  2874  	// in the backing slice.
  2875  	edge.LastUpdate = time.Unix(int64(chanUpdate.Timestamp), 0)
  2876  	edge.SigBytes = chanUpdate.Signature.ToSignatureBytes()
  2877  
  2878  	// To ensure that our signature is valid, we'll verify it ourself
  2879  	// before committing it to the slice returned.
  2880  	err = routing.ValidateChannelUpdateAnn(d.selfKey, info.Capacity, chanUpdate)
  2881  	if err != nil {
  2882  		return nil, nil, fmt.Errorf("generated invalid channel "+
  2883  			"update sig: %v", err)
  2884  	}
  2885  
  2886  	// Finally, we'll write the new edge policy to disk.
  2887  	if err := d.cfg.Router.UpdateEdge(edge); err != nil {
  2888  		return nil, nil, err
  2889  	}
  2890  
  2891  	log.Infof("Created ChannelUpdate for channel %s with update time %v",
  2892  		chanUpdate.ShortChannelID, edge.LastUpdate)
  2893  
  2894  	// We'll also create the original channel announcement so the two can
  2895  	// be broadcast along side each other (if necessary), but only if we
  2896  	// have a full channel announcement for this channel.
  2897  	var chanAnn *lnwire.ChannelAnnouncement
  2898  	if info.AuthProof != nil {
  2899  		chanID := lnwire.NewShortChanIDFromInt(info.ChannelID)
  2900  		chanAnn = &lnwire.ChannelAnnouncement{
  2901  			ShortChannelID:  chanID,
  2902  			NodeID1:         info.NodeKey1Bytes,
  2903  			NodeID2:         info.NodeKey2Bytes,
  2904  			ChainHash:       info.ChainHash,
  2905  			DecredKey1:      info.DecredKey1Bytes,
  2906  			Features:        lnwire.NewRawFeatureVector(),
  2907  			DecredKey2:      info.DecredKey2Bytes,
  2908  			ExtraOpaqueData: edge.ExtraOpaqueData,
  2909  		}
  2910  		chanAnn.NodeSig1, err = lnwire.NewSigFromRawSignature(
  2911  			info.AuthProof.NodeSig1Bytes,
  2912  		)
  2913  		if err != nil {
  2914  			return nil, nil, err
  2915  		}
  2916  		chanAnn.NodeSig2, err = lnwire.NewSigFromRawSignature(
  2917  			info.AuthProof.NodeSig2Bytes,
  2918  		)
  2919  		if err != nil {
  2920  			return nil, nil, err
  2921  		}
  2922  		chanAnn.DecredSig1, err = lnwire.NewSigFromRawSignature(
  2923  			info.AuthProof.DecredSig1Bytes,
  2924  		)
  2925  		if err != nil {
  2926  			return nil, nil, err
  2927  		}
  2928  		chanAnn.DecredSig2, err = lnwire.NewSigFromRawSignature(
  2929  			info.AuthProof.DecredSig2Bytes,
  2930  		)
  2931  		if err != nil {
  2932  			return nil, nil, err
  2933  		}
  2934  	}
  2935  
  2936  	return chanAnn, chanUpdate, err
  2937  }
  2938  
  2939  // SyncManager returns the gossiper's SyncManager instance.
  2940  func (d *AuthenticatedGossiper) SyncManager() *SyncManager {
  2941  	return d.syncMgr
  2942  }
  2943  
  2944  // IsKeepAliveUpdate determines whether this channel update is considered a
  2945  // keep-alive update based on the previous channel update processed for the same
  2946  // direction.
  2947  func IsKeepAliveUpdate(update *lnwire.ChannelUpdate,
  2948  	prev *channeldb.ChannelEdgePolicy) bool {
  2949  
  2950  	// Both updates should be from the same direction.
  2951  	if update.ChannelFlags&lnwire.ChanUpdateDirection !=
  2952  		prev.ChannelFlags&lnwire.ChanUpdateDirection {
  2953  		return false
  2954  	}
  2955  
  2956  	// The timestamp should always increase for a keep-alive update.
  2957  	timestamp := time.Unix(int64(update.Timestamp), 0)
  2958  	if !timestamp.After(prev.LastUpdate) {
  2959  		return false
  2960  	}
  2961  
  2962  	// None of the remaining fields should change for a keep-alive update.
  2963  	if update.ChannelFlags.IsDisabled() != prev.ChannelFlags.IsDisabled() {
  2964  		return false
  2965  	}
  2966  	if lnwire.MilliAtom(update.BaseFee) != prev.FeeBaseMAtoms {
  2967  		return false
  2968  	}
  2969  	if lnwire.MilliAtom(update.FeeRate) != prev.FeeProportionalMillionths {
  2970  		return false
  2971  	}
  2972  	if update.TimeLockDelta != prev.TimeLockDelta {
  2973  		return false
  2974  	}
  2975  	if update.HtlcMinimumMAtoms != prev.MinHTLC {
  2976  		return false
  2977  	}
  2978  	if update.MessageFlags.HasMaxHtlc() && !prev.MessageFlags.HasMaxHtlc() {
  2979  		return false
  2980  	}
  2981  	if update.HtlcMaximumMAtoms != prev.MaxHTLC {
  2982  		return false
  2983  	}
  2984  	if !bytes.Equal(update.ExtraOpaqueData, prev.ExtraOpaqueData) {
  2985  		return false
  2986  	}
  2987  	return true
  2988  }
  2989  
  2990  // latestHeight returns the gossiper's latest height known of the chain.
  2991  func (d *AuthenticatedGossiper) latestHeight() uint32 {
  2992  	d.Lock()
  2993  	defer d.Unlock()
  2994  	return d.bestHeight
  2995  }