github.com/decred/dcrlnd@v0.7.6/peer/brontide.go (about)

     1  package peer
     2  
     3  import (
     4  	"bytes"
     5  	"container/list"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"sync"
    10  	"sync/atomic"
    11  	"time"
    12  
    13  	"github.com/davecgh/go-spew/spew"
    14  	"github.com/decred/dcrd/chaincfg/chainhash"
    15  	"github.com/decred/dcrd/chaincfg/v3"
    16  	"github.com/decred/dcrd/connmgr"
    17  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    18  	"github.com/decred/dcrd/wire"
    19  
    20  	"github.com/decred/dcrlnd/buffer"
    21  	"github.com/decred/dcrlnd/chainntnfs"
    22  	"github.com/decred/dcrlnd/channeldb"
    23  	"github.com/decred/dcrlnd/channelnotifier"
    24  	"github.com/decred/dcrlnd/contractcourt"
    25  	"github.com/decred/dcrlnd/discovery"
    26  	"github.com/decred/dcrlnd/feature"
    27  	"github.com/decred/dcrlnd/funding"
    28  	"github.com/decred/dcrlnd/htlcswitch"
    29  	"github.com/decred/dcrlnd/htlcswitch/hodl"
    30  	"github.com/decred/dcrlnd/htlcswitch/hop"
    31  	"github.com/decred/dcrlnd/input"
    32  	"github.com/decred/dcrlnd/invoices"
    33  	"github.com/decred/dcrlnd/lnpeer"
    34  	"github.com/decred/dcrlnd/lnwallet"
    35  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    36  	"github.com/decred/dcrlnd/lnwallet/chancloser"
    37  	"github.com/decred/dcrlnd/lnwire"
    38  	"github.com/decred/dcrlnd/netann"
    39  	"github.com/decred/dcrlnd/pool"
    40  	"github.com/decred/dcrlnd/queue"
    41  	"github.com/decred/dcrlnd/ticker"
    42  	"github.com/decred/dcrlnd/watchtower/wtclient"
    43  )
    44  
    45  const (
    46  	// pingInterval is the interval at which ping messages are sent.
    47  	pingInterval = 1 * time.Minute
    48  
    49  	// idleTimeout is the duration of inactivity before we time out a peer.
    50  	idleTimeout = pingInterval + pingInterval/2
    51  
    52  	// sendStallTimeout is the interval such that, if we miss sending any
    53  	// messages before it expires, we preemptively close the connection to
    54  	// the remote peer. This is set to a value slightly before to
    55  	// idleTimeout so that we avoid sending messages just before the remote
    56  	// peer would disconnect from us.
    57  	sendStallTimeout = idleTimeout - 5*time.Second
    58  
    59  	// writeMessageTimeout is the timeout used when writing a message to the
    60  	// peer.
    61  	writeMessageTimeout = 5 * time.Second
    62  
    63  	// readMessageTimeout is the timeout used when reading a message from a
    64  	// peer.
    65  	readMessageTimeout = 5 * time.Second
    66  
    67  	// handshakeTimeout is the timeout used when waiting for the peer's init
    68  	// message.
    69  	handshakeTimeout = 15 * time.Second
    70  
    71  	// ErrorBufferSize is the number of historic peer errors that we store.
    72  	ErrorBufferSize = 10
    73  )
    74  
    75  var (
    76  	// ErrChannelNotFound is an error returned when a channel is queried and
    77  	// either the Brontide doesn't know of it, or the channel in question
    78  	// is pending.
    79  	ErrChannelNotFound = fmt.Errorf("channel not found")
    80  )
    81  
    82  // outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
    83  // a buffered channel which will be sent upon once the write is complete. This
    84  // buffered channel acts as a semaphore to be used for synchronization purposes.
    85  type outgoingMsg struct {
    86  	priority bool
    87  	msg      lnwire.Message
    88  	errChan  chan error // MUST be buffered.
    89  }
    90  
    91  // newChannelMsg packages a channeldb.OpenChannel with a channel that allows
    92  // the receiver of the request to report when the channel creation process has
    93  // completed.
    94  type newChannelMsg struct {
    95  	channel *channeldb.OpenChannel
    96  	err     chan error
    97  }
    98  
    99  type customMsg struct {
   100  	peer [33]byte
   101  	msg  lnwire.Custom
   102  }
   103  
   104  // closeMsg is a wrapper struct around any wire messages that deal with the
   105  // cooperative channel closure negotiation process. This struct includes the
   106  // raw channel ID targeted along with the original message.
   107  type closeMsg struct {
   108  	cid lnwire.ChannelID
   109  	msg lnwire.Message
   110  }
   111  
   112  // PendingUpdate describes the pending state of a closing channel.
   113  type PendingUpdate struct {
   114  	Txid        []byte
   115  	OutputIndex uint32
   116  }
   117  
   118  // ChannelCloseUpdate contains the outcome of the close channel operation.
   119  type ChannelCloseUpdate struct {
   120  	ClosingTxid []byte
   121  	Success     bool
   122  }
   123  
   124  // TimestampedError is a timestamped error that is used to store the most recent
   125  // errors we have experienced with our peers.
   126  type TimestampedError struct {
   127  	Error     error
   128  	Timestamp time.Time
   129  }
   130  
   131  // Config defines configuration fields that are necessary for a peer object
   132  // to function.
   133  type Config struct {
   134  	// Conn is the underlying network connection for this peer.
   135  	Conn MessageConn
   136  
   137  	// ConnReq stores information related to the persistent connection request
   138  	// for this peer.
   139  	ConnReq *connmgr.ConnReq
   140  
   141  	// PubKeyBytes is the serialized, compressed public key of this peer.
   142  	PubKeyBytes [33]byte
   143  
   144  	// Addr is the network address of the peer.
   145  	Addr *lnwire.NetAddress
   146  
   147  	// Inbound indicates whether or not the peer is an inbound peer.
   148  	Inbound bool
   149  
   150  	// Features is the set of features that we advertise to the remote party.
   151  	Features *lnwire.FeatureVector
   152  
   153  	// LegacyFeatures is the set of features that we advertise to the remote
   154  	// peer for backwards compatibility. Nodes that have not implemented
   155  	// flat features will still be able to read our feature bits from the
   156  	// legacy global field, but we will also advertise everything in the
   157  	// default features field.
   158  	LegacyFeatures *lnwire.FeatureVector
   159  
   160  	// OutgoingCltvRejectDelta defines the number of blocks before expiry of
   161  	// an htlc where we don't offer it anymore.
   162  	OutgoingCltvRejectDelta uint32
   163  
   164  	// ChanActiveTimeout specifies the duration the peer will wait to request
   165  	// a channel reenable, beginning from the time the peer was started.
   166  	ChanActiveTimeout time.Duration
   167  
   168  	// ErrorBuffer stores a set of errors related to a peer. It contains error
   169  	// messages that our peer has recently sent us over the wire and records of
   170  	// unknown messages that were sent to us so that we can have a full track
   171  	// record of the communication errors we have had with our peer. If we
   172  	// choose to disconnect from a peer, it also stores the reason we had for
   173  	// disconnecting.
   174  	ErrorBuffer *queue.CircularBuffer
   175  
   176  	// WritePool is the task pool that manages reuse of write buffers. Write
   177  	// tasks are submitted to the pool in order to conserve the total number of
   178  	// write buffers allocated at any one time, and decouple write buffer
   179  	// allocation from the peer life cycle.
   180  	WritePool *pool.Write
   181  
   182  	// ReadPool is the task pool that manages reuse of read buffers.
   183  	ReadPool *pool.Read
   184  
   185  	// Switch is a pointer to the htlcswitch. It is used to setup, get, and
   186  	// tear-down ChannelLinks.
   187  	Switch messageSwitch
   188  
   189  	// InterceptSwitch is a pointer to the InterceptableSwitch, a wrapper around
   190  	// the regular Switch. We only export it here to pass ForwardPackets to the
   191  	// ChannelLinkConfig.
   192  	InterceptSwitch *htlcswitch.InterceptableSwitch
   193  
   194  	// ChannelDB is used to fetch opened channels, and closed channels.
   195  	ChannelDB *channeldb.ChannelStateDB
   196  
   197  	// ChannelGraph is a pointer to the channel graph which is used to
   198  	// query information about the set of known active channels.
   199  	ChannelGraph *channeldb.ChannelGraph
   200  
   201  	// ChainArb is used to subscribe to channel events, update contract signals,
   202  	// and force close channels.
   203  	ChainArb *contractcourt.ChainArbitrator
   204  
   205  	// AuthGossiper is needed so that the Brontide impl can register with the
   206  	// gossiper and process remote channel announcements.
   207  	AuthGossiper *discovery.AuthenticatedGossiper
   208  
   209  	// ChanStatusMgr is used to set or un-set the disabled bit in channel
   210  	// updates.
   211  	ChanStatusMgr *netann.ChanStatusManager
   212  
   213  	// ChainIO is used to retrieve the best block.
   214  	ChainIO lnwallet.BlockChainIO
   215  
   216  	// FeeEstimator is used to compute our target ideal fee-per-kw when
   217  	// initializing the coop close process.
   218  	FeeEstimator chainfee.Estimator
   219  
   220  	// Signer is used when creating *lnwallet.LightningChannel instances.
   221  	Signer input.Signer
   222  
   223  	// SigPool is used when creating *lnwallet.LightningChannel instances.
   224  	SigPool *lnwallet.SigPool
   225  
   226  	// Wallet is used to publish transactions and generates delivery
   227  	// scripts during the coop close process.
   228  	Wallet *lnwallet.LightningWallet
   229  
   230  	// ChainNotifier is used to receive confirmations of a coop close
   231  	// transaction.
   232  	ChainNotifier chainntnfs.ChainNotifier
   233  
   234  	// RoutingPolicy is used to set the forwarding policy for links created by
   235  	// the Brontide.
   236  	RoutingPolicy htlcswitch.ForwardingPolicy
   237  
   238  	// Sphinx is used when setting up ChannelLinks so they can decode sphinx
   239  	// onion blobs.
   240  	Sphinx *hop.OnionProcessor
   241  
   242  	// WitnessBeacon is used when setting up ChannelLinks so they can add any
   243  	// preimages that they learn.
   244  	WitnessBeacon contractcourt.WitnessBeacon
   245  
   246  	// Invoices is passed to the ChannelLink on creation and handles all
   247  	// invoice-related logic.
   248  	Invoices *invoices.InvoiceRegistry
   249  
   250  	// ChannelNotifier is used by the link to notify other sub-systems about
   251  	// channel-related events and by the Brontide to subscribe to
   252  	// ActiveLinkEvents.
   253  	ChannelNotifier *channelnotifier.ChannelNotifier
   254  
   255  	// HtlcNotifier is used when creating a ChannelLink.
   256  	HtlcNotifier *htlcswitch.HtlcNotifier
   257  
   258  	// TowerClient is used by legacy channels to backup revoked states.
   259  	TowerClient wtclient.Client
   260  
   261  	// AnchorTowerClient is used by anchor channels to backup revoked
   262  	// states.
   263  	AnchorTowerClient wtclient.Client
   264  
   265  	// DisconnectPeer is used to disconnect this peer if the cooperative close
   266  	// process fails.
   267  	DisconnectPeer func(*secp256k1.PublicKey) error
   268  
   269  	// GenNodeAnnouncement is used to send our node announcement to the remote
   270  	// on startup.
   271  	GenNodeAnnouncement func(bool,
   272  		...netann.NodeAnnModifier) (lnwire.NodeAnnouncement, error)
   273  
   274  	// PrunePersistentPeerConnection is used to remove all internal state
   275  	// related to this peer in the server.
   276  	PrunePersistentPeerConnection func([33]byte)
   277  
   278  	// FetchLastChanUpdate fetches our latest channel update for a target
   279  	// channel.
   280  	FetchLastChanUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate,
   281  		error)
   282  
   283  	// FundingManager is an implementation of the funding.Controller interface.
   284  	FundingManager funding.Controller
   285  
   286  	// Hodl is used when creating ChannelLinks to specify HodlFlags as
   287  	// breakpoints in dev builds.
   288  	Hodl *hodl.Config
   289  
   290  	// UnsafeReplay is used when creating ChannelLinks to specify whether or
   291  	// not to replay adds on its commitment tx.
   292  	UnsafeReplay bool
   293  
   294  	// MaxOutgoingCltvExpiry is used when creating ChannelLinks and is the max
   295  	// number of blocks that funds could be locked up for when forwarding
   296  	// payments.
   297  	MaxOutgoingCltvExpiry uint32
   298  
   299  	// MaxChannelFeeAllocation is used when creating ChannelLinks and is the
   300  	// maximum percentage of total funds that can be allocated to a channel's
   301  	// commitment fee. This only applies for the initiator of the channel.
   302  	MaxChannelFeeAllocation float64
   303  
   304  	// MaxAnchorsCommitFeeRate is the maximum fee rate we'll use as an
   305  	// initiator for anchor channel commitments.
   306  	MaxAnchorsCommitFeeRate chainfee.AtomPerKByte
   307  
   308  	// CoopCloseTargetConfs is the confirmation target that will be used
   309  	// to estimate the fee rate to use during a cooperative channel
   310  	// closure initiated by the remote peer.
   311  	CoopCloseTargetConfs uint32
   312  
   313  	// ServerPubKey is the serialized, compressed public key of our lnd node.
   314  	// It is used to determine which policy (channel edge) to pass to the
   315  	// ChannelLink.
   316  	ServerPubKey [33]byte
   317  
   318  	// ChannelCommitInterval is the maximum time that is allowed to pass between
   319  	// receiving a channel state update and signing the next commitment.
   320  	// Setting this to a longer duration allows for more efficient channel
   321  	// operations at the cost of latency.
   322  	ChannelCommitInterval time.Duration
   323  
   324  	// ChannelCommitBatchSize is the maximum number of channel state updates
   325  	// that is accumulated before signing a new commitment.
   326  	ChannelCommitBatchSize uint32
   327  
   328  	// HandleCustomMessage is called whenever a custom message is received
   329  	// from the peer.
   330  	HandleCustomMessage func(peer [33]byte, msg *lnwire.Custom) error
   331  
   332  	// PongBuf is a slice we'll reuse instead of allocating memory on the
   333  	// heap. Since only reads will occur and no writes, there is no need
   334  	// for any synchronization primitives. As a result, it's safe to share
   335  	// this across multiple Peer struct instances.
   336  	PongBuf []byte
   337  
   338  	// Quit is the server's quit channel. If this is closed, we halt operation.
   339  	Quit chan struct{}
   340  
   341  	// ChainParams is the network this peer is connected to.
   342  	ChainParams *chaincfg.Params
   343  }
   344  
   345  // Brontide is an active peer on the Lightning Network. This struct is responsible
   346  // for managing any channel state related to this peer. To do so, it has
   347  // several helper goroutines to handle events such as HTLC timeouts, new
   348  // funding workflow, and detecting an uncooperative closure of any active
   349  // channels.
   350  // TODO(roasbeef): proper reconnection logic
   351  type Brontide struct {
   352  	// MUST be used atomically.
   353  	started    int32
   354  	disconnect int32
   355  
   356  	// MUST be used atomically.
   357  	bytesReceived uint64
   358  	bytesSent     uint64
   359  
   360  	// The following fields are used for handling forced ping requests done
   361  	// by EnforcePing().
   362  	enforcePingMtx   sync.Mutex
   363  	enforcePingChan  chan struct{}
   364  	enforcePongChans []chan time.Duration
   365  
   366  	// pingTime is a rough estimate of the RTT (round-trip-time) between us
   367  	// and the connected peer. This time is expressed in microseconds.
   368  	// To be used atomically.
   369  	// TODO(roasbeef): also use a WMA or EMA?
   370  	pingTime int64
   371  
   372  	// pingLastSend is the Unix time expressed in nanoseconds when we sent
   373  	// our last ping message. To be used atomically.
   374  	pingLastSend int64
   375  
   376  	// lastPingPayload stores an unsafe pointer wrapped as an atomic
   377  	// variable which points to the last payload the remote party sent us
   378  	// as their ping.
   379  	//
   380  	// MUST be used atomically.
   381  	lastPingPayload atomic.Value
   382  
   383  	cfg Config
   384  
   385  	// activeSignal when closed signals that the peer is now active and
   386  	// ready to process messages.
   387  	activeSignal chan struct{}
   388  
   389  	// startTime is the time this peer connection was successfully established.
   390  	// It will be zero for peers that did not successfully call Start().
   391  	startTime time.Time
   392  
   393  	// sendQueue is the channel which is used to queue outgoing messages to be
   394  	// written onto the wire. Note that this channel is unbuffered.
   395  	sendQueue chan outgoingMsg
   396  
   397  	// outgoingQueue is a buffered channel which allows second/third party
   398  	// objects to queue messages to be sent out on the wire.
   399  	outgoingQueue chan outgoingMsg
   400  
   401  	// activeChanMtx protects access to the activeChannels and
   402  	// addedChannels maps.
   403  	activeChanMtx sync.RWMutex
   404  
   405  	// activeChannels is a map which stores the state machines of all
   406  	// active channels. Channels are indexed into the map by the txid of
   407  	// the funding transaction which opened the channel.
   408  	//
   409  	// NOTE: On startup, pending channels are stored as nil in this map.
   410  	// Confirmed channels have channel data populated in the map. This means
   411  	// that accesses to this map should nil-check the LightningChannel to
   412  	// see if this is a pending channel or not. The tradeoff here is either
   413  	// having two maps everywhere (one for pending, one for confirmed chans)
   414  	// or having an extra nil-check per access.
   415  	activeChannels map[lnwire.ChannelID]*lnwallet.LightningChannel
   416  
   417  	// addedChannels tracks any new channels opened during this peer's
   418  	// lifecycle. We use this to filter out these new channels when the time
   419  	// comes to request a reenable for active channels, since they will have
   420  	// waited a shorter duration.
   421  	addedChannels map[lnwire.ChannelID]struct{}
   422  
   423  	// newChannels is used by the fundingManager to send fully opened
   424  	// channels to the source peer which handled the funding workflow.
   425  	newChannels chan *newChannelMsg
   426  
   427  	// activeMsgStreams is a map from channel id to the channel streams that
   428  	// proxy messages to individual, active links.
   429  	activeMsgStreams map[lnwire.ChannelID]*msgStream
   430  
   431  	// activeChanCloses is a map that keeps track of all the active
   432  	// cooperative channel closures. Any channel closing messages are directed
   433  	// to one of these active state machines. Once the channel has been closed,
   434  	// the state machine will be deleted from the map.
   435  	activeChanCloses map[lnwire.ChannelID]*chancloser.ChanCloser
   436  
   437  	// localCloseChanReqs is a channel in which any local requests to close
   438  	// a particular channel are sent over.
   439  	localCloseChanReqs chan *htlcswitch.ChanClose
   440  
   441  	// linkFailures receives all reported channel failures from the switch,
   442  	// and instructs the channelManager to clean remaining channel state.
   443  	linkFailures chan linkFailureReport
   444  
   445  	// chanCloseMsgs is a channel that any message related to channel
   446  	// closures are sent over. This includes lnwire.Shutdown message as
   447  	// well as lnwire.ClosingSigned messages.
   448  	chanCloseMsgs chan *closeMsg
   449  
   450  	// remoteFeatures is the feature vector received from the peer during
   451  	// the connection handshake.
   452  	remoteFeatures *lnwire.FeatureVector
   453  
   454  	// resentChanSyncMsg is a set that keeps track of which channels we
   455  	// have re-sent channel reestablishment messages for. This is done to
   456  	// avoid getting into loop where both peers will respond to the other
   457  	// peer's chansync message with its own over and over again.
   458  	resentChanSyncMsg map[lnwire.ChannelID]struct{}
   459  
   460  	queueQuit chan struct{}
   461  	quit      chan struct{}
   462  	wg        sync.WaitGroup
   463  }
   464  
   465  // A compile-time check to ensure that Brontide satisfies the lnpeer.Peer interface.
   466  var _ lnpeer.Peer = (*Brontide)(nil)
   467  
   468  // NewBrontide creates a new Brontide from a peer.Config struct.
   469  func NewBrontide(cfg Config) *Brontide {
   470  	p := &Brontide{
   471  		cfg:            cfg,
   472  		activeSignal:   make(chan struct{}),
   473  		sendQueue:      make(chan outgoingMsg),
   474  		outgoingQueue:  make(chan outgoingMsg),
   475  		addedChannels:  make(map[lnwire.ChannelID]struct{}),
   476  		activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
   477  		newChannels:    make(chan *newChannelMsg, 1),
   478  
   479  		enforcePingChan:    make(chan struct{}),
   480  		activeMsgStreams:   make(map[lnwire.ChannelID]*msgStream),
   481  		activeChanCloses:   make(map[lnwire.ChannelID]*chancloser.ChanCloser),
   482  		localCloseChanReqs: make(chan *htlcswitch.ChanClose),
   483  		linkFailures:       make(chan linkFailureReport),
   484  		chanCloseMsgs:      make(chan *closeMsg),
   485  		resentChanSyncMsg:  make(map[lnwire.ChannelID]struct{}),
   486  		queueQuit:          make(chan struct{}),
   487  		quit:               make(chan struct{}),
   488  	}
   489  
   490  	return p
   491  }
   492  
   493  // Start starts all helper goroutines the peer needs for normal operations.  In
   494  // the case this peer has already been started, then this function is a noop.
   495  func (p *Brontide) Start() error {
   496  	if atomic.AddInt32(&p.started, 1) != 1 {
   497  		return nil
   498  	}
   499  
   500  	peerLog.Tracef("Peer %v starting with conn[%v->%v]", p,
   501  		p.cfg.Conn.LocalAddr(), p.cfg.Conn.RemoteAddr())
   502  
   503  	// Fetch and then load all the active channels we have with this remote
   504  	// peer from the database.
   505  	activeChans, err := p.cfg.ChannelDB.FetchOpenChannels(
   506  		p.cfg.Addr.IdentityKey,
   507  	)
   508  	if err != nil {
   509  		peerLog.Errorf("Unable to fetch active chans "+
   510  			"for peer %v: %v", p, err)
   511  		return err
   512  	}
   513  
   514  	if len(activeChans) == 0 {
   515  		p.cfg.PrunePersistentPeerConnection(p.cfg.PubKeyBytes)
   516  	}
   517  
   518  	// Quickly check if we have any existing legacy channels with this
   519  	// peer.
   520  	haveLegacyChan := false
   521  	for _, c := range activeChans {
   522  		if c.ChanType.IsTweakless() {
   523  			continue
   524  		}
   525  
   526  		haveLegacyChan = true
   527  		break
   528  	}
   529  
   530  	// Exchange local and global features, the init message should be very
   531  	// first between two nodes.
   532  	if err := p.sendInitMsg(haveLegacyChan); err != nil {
   533  		return fmt.Errorf("unable to send init msg: %v", err)
   534  	}
   535  
   536  	// Before we launch any of the helper goroutines off the peer struct,
   537  	// we'll first ensure proper adherence to the p2p protocol. The init
   538  	// message MUST be sent before any other message.
   539  	readErr := make(chan error, 1)
   540  	msgChan := make(chan lnwire.Message, 1)
   541  	p.wg.Add(1)
   542  	go func() {
   543  		defer p.wg.Done()
   544  
   545  		msg, err := p.readNextMessage()
   546  		if err != nil {
   547  			readErr <- err
   548  			msgChan <- nil
   549  			return
   550  		}
   551  		readErr <- nil
   552  		msgChan <- msg
   553  	}()
   554  
   555  	select {
   556  	// In order to avoid blocking indefinitely, we'll give the other peer
   557  	// an upper timeout to respond before we bail out early.
   558  	case <-time.After(handshakeTimeout):
   559  		return fmt.Errorf("peer did not complete handshake within %v",
   560  			handshakeTimeout)
   561  	case err := <-readErr:
   562  		if err != nil {
   563  			return fmt.Errorf("unable to read init msg: %v", err)
   564  		}
   565  	}
   566  
   567  	// Once the init message arrives, we can parse it so we can figure out
   568  	// the negotiation of features for this session.
   569  	msg := <-msgChan
   570  	if msg, ok := msg.(*lnwire.Init); ok {
   571  		if err := p.handleInitMsg(msg); err != nil {
   572  			p.storeError(err)
   573  			return err
   574  		}
   575  	} else {
   576  		return errors.New("very first message between nodes " +
   577  			"must be init message")
   578  	}
   579  
   580  	// Next, load all the active channels we have with this peer,
   581  	// registering them with the switch and launching the necessary
   582  	// goroutines required to operate them.
   583  	peerLog.Debugf("Loaded %v active channels from database with "+
   584  		"NodeKey(%x)", len(activeChans), p.PubKey())
   585  
   586  	msgs, err := p.loadActiveChannels(activeChans)
   587  	if err != nil {
   588  		return fmt.Errorf("unable to load channels: %v", err)
   589  	}
   590  
   591  	p.startTime = time.Now()
   592  
   593  	p.wg.Add(5)
   594  	go p.queueHandler()
   595  	go p.writeHandler()
   596  	go p.readHandler()
   597  	go p.channelManager()
   598  	go p.pingHandler()
   599  
   600  	// Signal to any external processes that the peer is now active.
   601  	close(p.activeSignal)
   602  
   603  	// Now that the peer has started up, we send any channel sync messages
   604  	// that must be resent for borked channels.
   605  	if len(msgs) > 0 {
   606  		peerLog.Infof("Sending %d channel sync messages to peer after "+
   607  			"loading active channels", len(msgs))
   608  		if err := p.SendMessage(true, msgs...); err != nil {
   609  			peerLog.Warnf("Failed sending channel sync "+
   610  				"messages to peer %v: %v", p, err)
   611  		}
   612  	}
   613  
   614  	// Node announcements don't propagate very well throughout the network
   615  	// as there isn't a way to efficiently query for them through their
   616  	// timestamp, mostly affecting nodes that were offline during the time
   617  	// of broadcast. We'll resend our node announcement to the remote peer
   618  	// as a best-effort delivery such that it can also propagate to their
   619  	// peers. To ensure they can successfully process it in most cases,
   620  	// we'll only resend it as long as we have at least one confirmed
   621  	// advertised channel with the remote peer.
   622  	//
   623  	// TODO(wilmer): Remove this once we're able to query for node
   624  	// announcements through their timestamps.
   625  	p.maybeSendNodeAnn(activeChans)
   626  
   627  	return nil
   628  }
   629  
   630  // initGossipSync initializes either a gossip syncer or an initial routing
   631  // dump, depending on the negotiated synchronization method.
   632  func (p *Brontide) initGossipSync() {
   633  
   634  	// If the remote peer knows of the new gossip queries feature, then
   635  	// we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
   636  	if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) {
   637  		peerLog.Infof("Negotiated chan series queries with %x",
   638  			p.cfg.PubKeyBytes[:])
   639  
   640  		// Register the peer's gossip syncer with the gossiper.
   641  		// This blocks synchronously to ensure the gossip syncer is
   642  		// registered with the gossiper before attempting to read
   643  		// messages from the remote peer.
   644  		//
   645  		// TODO(wilmer): Only sync updates from non-channel peers. This
   646  		// requires an improved version of the current network
   647  		// bootstrapper to ensure we can find and connect to non-channel
   648  		// peers.
   649  		p.cfg.AuthGossiper.InitSyncState(p)
   650  	}
   651  }
   652  
   653  // QuitSignal is a method that should return a channel which will be sent upon
   654  // or closed once the backing peer exits. This allows callers using the
   655  // interface to cancel any processing in the event the backing implementation
   656  // exits.
   657  //
   658  // NOTE: Part of the lnpeer.Peer interface.
   659  func (p *Brontide) QuitSignal() <-chan struct{} {
   660  	return p.quit
   661  }
   662  
   663  // loadActiveChannels creates indexes within the peer for tracking all active
   664  // channels returned by the database. It returns a slice of channel reestablish
   665  // messages that should be sent to the peer immediately, in case we have borked
   666  // channels that haven't been closed yet.
   667  func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
   668  	[]lnwire.Message, error) {
   669  
   670  	// Return a slice of messages to send to the peers in case the channel
   671  	// cannot be loaded normally.
   672  	var msgs []lnwire.Message
   673  
   674  	for _, dbChan := range chans {
   675  		lnChan, err := lnwallet.NewLightningChannel(
   676  			p.cfg.Signer, dbChan, p.cfg.SigPool,
   677  			p.cfg.ChainParams,
   678  		)
   679  		if err != nil {
   680  			return nil, err
   681  		}
   682  
   683  		chanPoint := &dbChan.FundingOutpoint
   684  
   685  		chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
   686  
   687  		peerLog.Infof("NodeKey(%x) loading ChannelPoint(%v)",
   688  			p.PubKey(), chanPoint)
   689  
   690  		// Skip adding any permanently irreconcilable channels to the
   691  		// htlcswitch.
   692  		if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
   693  			!dbChan.HasChanStatus(channeldb.ChanStatusRestored) {
   694  
   695  			peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
   696  				"start.", chanPoint, dbChan.ChanStatus())
   697  
   698  			// To help our peer recover from a potential data loss,
   699  			// we resend our channel reestablish message if the
   700  			// channel is in a borked state. We won't process any
   701  			// channel reestablish message sent from the peer, but
   702  			// that's okay since the assumption is that we did when
   703  			// marking the channel borked.
   704  			chanSync, err := dbChan.ChanSyncMsg()
   705  			if err != nil {
   706  				peerLog.Errorf("Unable to create channel "+
   707  					"reestablish message for channel %v: "+
   708  					"%v", chanPoint, err)
   709  				continue
   710  			}
   711  
   712  			msgs = append(msgs, chanSync)
   713  			continue
   714  		}
   715  
   716  		// Before we register this new link with the HTLC Switch, we'll
   717  		// need to fetch its current link-layer forwarding policy from
   718  		// the database.
   719  		graph := p.cfg.ChannelGraph
   720  		info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint)
   721  		if err != nil && err != channeldb.ErrEdgeNotFound {
   722  			return nil, err
   723  		}
   724  
   725  		// We'll filter out our policy from the directional channel
   726  		// edges based whom the edge connects to. If it doesn't connect
   727  		// to us, then we know that we were the one that advertised the
   728  		// policy.
   729  		//
   730  		// TODO(roasbeef): can add helper method to get policy for
   731  		// particular channel.
   732  		var selfPolicy *channeldb.ChannelEdgePolicy
   733  		if info != nil && bytes.Equal(info.NodeKey1Bytes[:],
   734  			p.cfg.ServerPubKey[:]) {
   735  
   736  			selfPolicy = p1
   737  		} else {
   738  			selfPolicy = p2
   739  		}
   740  
   741  		// If we don't yet have an advertised routing policy, then
   742  		// we'll use the current default, otherwise we'll translate the
   743  		// routing policy into a forwarding policy.
   744  		var forwardingPolicy *htlcswitch.ForwardingPolicy
   745  		if selfPolicy != nil {
   746  			forwardingPolicy = &htlcswitch.ForwardingPolicy{
   747  				MinHTLCOut:    selfPolicy.MinHTLC,
   748  				MaxHTLC:       selfPolicy.MaxHTLC,
   749  				BaseFee:       selfPolicy.FeeBaseMAtoms,
   750  				FeeRate:       selfPolicy.FeeProportionalMillionths,
   751  				TimeLockDelta: uint32(selfPolicy.TimeLockDelta),
   752  			}
   753  		} else {
   754  			peerLog.Warnf("Unable to find our forwarding policy "+
   755  				"for channel %v, using default values",
   756  				chanPoint)
   757  			forwardingPolicy = &p.cfg.RoutingPolicy
   758  		}
   759  
   760  		peerLog.Tracef("Using link policy of: %v",
   761  			spew.Sdump(forwardingPolicy))
   762  
   763  		// If the channel is pending, set the value to nil in the
   764  		// activeChannels map. This is done to signify that the channel is
   765  		// pending. We don't add the link to the switch here - it's the funding
   766  		// manager's responsibility to spin up pending channels. Adding them
   767  		// here would just be extra work as we'll tear them down when creating
   768  		// + adding the final link.
   769  		if lnChan.IsPending() {
   770  			p.activeChanMtx.Lock()
   771  			p.activeChannels[chanID] = nil
   772  			p.activeChanMtx.Unlock()
   773  
   774  			continue
   775  		}
   776  
   777  		// Subscribe to the set of on-chain events for this channel.
   778  		chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(
   779  			*chanPoint,
   780  		)
   781  		if err != nil {
   782  			return nil, err
   783  		}
   784  
   785  		err = p.addLink(
   786  			chanPoint, lnChan, forwardingPolicy, chainEvents,
   787  			true,
   788  		)
   789  		if err != nil {
   790  			return nil, fmt.Errorf("unable to add link %v to "+
   791  				"switch: %v", chanPoint, err)
   792  		}
   793  
   794  		p.activeChanMtx.Lock()
   795  		p.activeChannels[chanID] = lnChan
   796  		p.activeChanMtx.Unlock()
   797  	}
   798  
   799  	return msgs, nil
   800  }
   801  
   802  // addLink creates and adds a new ChannelLink from the specified channel.
   803  func (p *Brontide) addLink(chanPoint *wire.OutPoint,
   804  	lnChan *lnwallet.LightningChannel,
   805  	forwardingPolicy *htlcswitch.ForwardingPolicy,
   806  	chainEvents *contractcourt.ChainEventSubscription,
   807  	syncStates bool) error {
   808  
   809  	// onChannelFailure will be called by the link in case the channel
   810  	// fails for some reason.
   811  	onChannelFailure := func(chanID lnwire.ChannelID,
   812  		shortChanID lnwire.ShortChannelID,
   813  		linkErr htlcswitch.LinkFailureError) {
   814  
   815  		failure := linkFailureReport{
   816  			chanPoint:   *chanPoint,
   817  			chanID:      chanID,
   818  			shortChanID: shortChanID,
   819  			linkErr:     linkErr,
   820  		}
   821  
   822  		select {
   823  		case p.linkFailures <- failure:
   824  		case <-p.quit:
   825  		case <-p.cfg.Quit:
   826  		}
   827  	}
   828  
   829  	updateContractSignals := func(signals *contractcourt.ContractSignals) error {
   830  		return p.cfg.ChainArb.UpdateContractSignals(*chanPoint, signals)
   831  	}
   832  
   833  	chanType := lnChan.State().ChanType
   834  
   835  	// Select the appropriate tower client based on the channel type. It's
   836  	// okay if the clients are disabled altogether and these values are nil,
   837  	// as the link will check for nilness before using either.
   838  	var towerClient htlcswitch.TowerClient
   839  	if chanType.HasAnchors() {
   840  		towerClient = p.cfg.AnchorTowerClient
   841  	} else {
   842  		towerClient = p.cfg.TowerClient
   843  	}
   844  
   845  	linkCfg := htlcswitch.ChannelLinkConfig{
   846  		Peer:                    p,
   847  		DecodeHopIterators:      p.cfg.Sphinx.DecodeHopIterators,
   848  		ExtractErrorEncrypter:   p.cfg.Sphinx.ExtractErrorEncrypter,
   849  		FetchLastChannelUpdate:  p.cfg.FetchLastChanUpdate,
   850  		HodlMask:                p.cfg.Hodl.Mask(),
   851  		Registry:                p.cfg.Invoices,
   852  		BestHeight:              p.cfg.Switch.BestHeight,
   853  		Circuits:                p.cfg.Switch.CircuitModifier(),
   854  		ForwardPackets:          p.cfg.InterceptSwitch.ForwardPackets,
   855  		FwrdingPolicy:           *forwardingPolicy,
   856  		FeeEstimator:            p.cfg.FeeEstimator,
   857  		PreimageCache:           p.cfg.WitnessBeacon,
   858  		ChainEvents:             chainEvents,
   859  		UpdateContractSignals:   updateContractSignals,
   860  		OnChannelFailure:        onChannelFailure,
   861  		SyncStates:              syncStates,
   862  		BatchTicker:             ticker.New(p.cfg.ChannelCommitInterval),
   863  		FwdPkgGCTicker:          ticker.New(time.Hour),
   864  		PendingCommitTicker:     ticker.New(time.Minute),
   865  		BatchSize:               p.cfg.ChannelCommitBatchSize,
   866  		UnsafeReplay:            p.cfg.UnsafeReplay,
   867  		MinFeeUpdateTimeout:     htlcswitch.DefaultMinLinkFeeUpdateTimeout,
   868  		MaxFeeUpdateTimeout:     htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
   869  		OutgoingCltvRejectDelta: p.cfg.OutgoingCltvRejectDelta,
   870  		TowerClient:             towerClient,
   871  		MaxOutgoingCltvExpiry:   p.cfg.MaxOutgoingCltvExpiry,
   872  		MaxFeeAllocation:        p.cfg.MaxChannelFeeAllocation,
   873  		MaxAnchorsCommitFeeRate: p.cfg.MaxAnchorsCommitFeeRate,
   874  		NotifyActiveLink:        p.cfg.ChannelNotifier.NotifyActiveLinkEvent,
   875  		NotifyActiveChannel:     p.cfg.ChannelNotifier.NotifyActiveChannelEvent,
   876  		NotifyInactiveChannel:   p.cfg.ChannelNotifier.NotifyInactiveChannelEvent,
   877  		HtlcNotifier:            p.cfg.HtlcNotifier,
   878  
   879  		ResetChanReestablishWaitTime: p.cfg.ChannelDB.ResetChanReestablishWaitTime,
   880  		AddToChanReestablishWaitTime: p.cfg.ChannelDB.AddToChanReestablishWaitTime,
   881  	}
   882  
   883  	// Before adding our new link, purge the switch of any pending or live
   884  	// links going by the same channel id. If one is found, we'll shut it
   885  	// down to ensure that the mailboxes are only ever under the control of
   886  	// one link.
   887  	chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
   888  	p.cfg.Switch.RemoveLink(chanID)
   889  
   890  	// With the channel link created, we'll now notify the htlc switch so
   891  	// this channel can be used to dispatch local payments and also
   892  	// passively forward payments.
   893  	return p.cfg.Switch.CreateAndAddLink(linkCfg, lnChan)
   894  }
   895  
   896  // maybeSendNodeAnn sends our node announcement to the remote peer if at least
   897  // one confirmed public channel exists with them.
   898  func (p *Brontide) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
   899  	hasConfirmedPublicChan := false
   900  	for _, channel := range channels {
   901  		if channel.IsPending {
   902  			continue
   903  		}
   904  		if channel.ChannelFlags&lnwire.FFAnnounceChannel == 0 {
   905  			continue
   906  		}
   907  
   908  		hasConfirmedPublicChan = true
   909  		break
   910  	}
   911  	if !hasConfirmedPublicChan {
   912  		return
   913  	}
   914  
   915  	ourNodeAnn, err := p.cfg.GenNodeAnnouncement(false)
   916  	if err != nil {
   917  		peerLog.Debugf("Unable to retrieve node announcement: %v", err)
   918  		return
   919  	}
   920  
   921  	if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil {
   922  		peerLog.Debugf("Unable to resend node announcement to %x: %v",
   923  			p.cfg.PubKeyBytes, err)
   924  	}
   925  }
   926  
   927  // WaitForDisconnect waits until the peer has disconnected. A peer may be
   928  // disconnected if the local or remote side terminates the connection, or an
   929  // irrecoverable protocol error has been encountered. This method will only
   930  // begin watching the peer's waitgroup after the ready channel or the peer's
   931  // quit channel are signaled. The ready channel should only be signaled if a
   932  // call to Start returns no error. Otherwise, if the peer fails to start,
   933  // calling Disconnect will signal the quit channel and the method will not
   934  // block, since no goroutines were spawned.
   935  func (p *Brontide) WaitForDisconnect(ready chan struct{}) {
   936  	select {
   937  	case <-ready:
   938  	case <-p.quit:
   939  	}
   940  
   941  	p.wg.Wait()
   942  }
   943  
   944  // Disconnect terminates the connection with the remote peer. Additionally, a
   945  // signal is sent to the server and htlcSwitch indicating the resources
   946  // allocated to the peer can now be cleaned up.
   947  func (p *Brontide) Disconnect(reason error) {
   948  	if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
   949  		return
   950  	}
   951  
   952  	err := fmt.Errorf("disconnecting %s, reason: %v", p, reason)
   953  	p.storeError(err)
   954  
   955  	peerLog.Infof(err.Error())
   956  
   957  	// Ensure that the TCP connection is properly closed before continuing.
   958  	p.cfg.Conn.Close()
   959  
   960  	close(p.quit)
   961  }
   962  
   963  // String returns the string representation of this peer.
   964  func (p *Brontide) String() string {
   965  	return fmt.Sprintf("%x@%s", p.cfg.PubKeyBytes, p.cfg.Conn.RemoteAddr())
   966  }
   967  
   968  // readNextMessage reads, and returns the next message on the wire along with
   969  // any additional raw payload.
   970  func (p *Brontide) readNextMessage() (lnwire.Message, error) {
   971  	noiseConn := p.cfg.Conn
   972  	err := noiseConn.SetReadDeadline(time.Time{})
   973  	if err != nil {
   974  		return nil, err
   975  	}
   976  
   977  	pktLen, err := noiseConn.ReadNextHeader()
   978  	if err != nil {
   979  		return nil, err
   980  	}
   981  
   982  	// First we'll read the next _full_ message. We do this rather than
   983  	// reading incrementally from the stream as the Lightning wire protocol
   984  	// is message oriented and allows nodes to pad on additional data to
   985  	// the message stream.
   986  	var (
   987  		nextMsg lnwire.Message
   988  		msgLen  uint64
   989  	)
   990  	err = p.cfg.ReadPool.Submit(func(buf *buffer.Read) error {
   991  		// Before reading the body of the message, set the read timeout
   992  		// accordingly to ensure we don't block other readers using the
   993  		// pool. We do so only after the task has been scheduled to
   994  		// ensure the deadline doesn't expire while the message is in
   995  		// the process of being scheduled.
   996  		readDeadline := time.Now().Add(readMessageTimeout)
   997  		readErr := noiseConn.SetReadDeadline(readDeadline)
   998  		if readErr != nil {
   999  			return readErr
  1000  		}
  1001  
  1002  		// The ReadNextBody method will actually end up re-using the
  1003  		// buffer, so within this closure, we can continue to use
  1004  		// rawMsg as it's just a slice into the buf from the buffer
  1005  		// pool.
  1006  		rawMsg, readErr := noiseConn.ReadNextBody(buf[:pktLen])
  1007  		if readErr != nil {
  1008  			return readErr
  1009  		}
  1010  		msgLen = uint64(len(rawMsg))
  1011  
  1012  		// Next, create a new io.Reader implementation from the raw
  1013  		// message, and use this to decode the message directly from.
  1014  		msgReader := bytes.NewReader(rawMsg)
  1015  		nextMsg, err = lnwire.ReadMessage(msgReader, 0)
  1016  		if err != nil {
  1017  			return err
  1018  		}
  1019  
  1020  		// At this point, rawMsg and buf will be returned back to the
  1021  		// buffer pool for re-use.
  1022  		return nil
  1023  	})
  1024  	atomic.AddUint64(&p.bytesReceived, msgLen)
  1025  	if err != nil {
  1026  		return nil, err
  1027  	}
  1028  
  1029  	p.logWireMessage(nextMsg, true)
  1030  
  1031  	return nextMsg, nil
  1032  }
  1033  
  1034  // msgStream implements a goroutine-safe, in-order stream of messages to be
  1035  // delivered via closure to a receiver. These messages MUST be in order due to
  1036  // the nature of the lightning channel commitment and gossiper state machines.
  1037  // TODO(conner): use stream handler interface to abstract out stream
  1038  // state/logging
  1039  type msgStream struct {
  1040  	streamShutdown int32 // To be used atomically.
  1041  
  1042  	peer *Brontide
  1043  
  1044  	apply func(lnwire.Message)
  1045  
  1046  	startMsg string
  1047  	stopMsg  string
  1048  
  1049  	msgCond *sync.Cond
  1050  	msgs    []lnwire.Message
  1051  
  1052  	mtx sync.Mutex
  1053  
  1054  	producerSema chan struct{}
  1055  
  1056  	wg   sync.WaitGroup
  1057  	quit chan struct{}
  1058  }
  1059  
  1060  // newMsgStream creates a new instance of a chanMsgStream for a particular
  1061  // channel identified by its channel ID. bufSize is the max number of messages
  1062  // that should be buffered in the internal queue. Callers should set this to a
  1063  // sane value that avoids blocking unnecessarily, but doesn't allow an
  1064  // unbounded amount of memory to be allocated to buffer incoming messages.
  1065  func newMsgStream(p *Brontide, startMsg, stopMsg string, bufSize uint32,
  1066  	apply func(lnwire.Message)) *msgStream {
  1067  
  1068  	stream := &msgStream{
  1069  		peer:         p,
  1070  		apply:        apply,
  1071  		startMsg:     startMsg,
  1072  		stopMsg:      stopMsg,
  1073  		producerSema: make(chan struct{}, bufSize),
  1074  		quit:         make(chan struct{}),
  1075  	}
  1076  	stream.msgCond = sync.NewCond(&stream.mtx)
  1077  
  1078  	// Before we return the active stream, we'll populate the producer's
  1079  	// semaphore channel. We'll use this to ensure that the producer won't
  1080  	// attempt to allocate memory in the queue for an item until it has
  1081  	// sufficient extra space.
  1082  	for i := uint32(0); i < bufSize; i++ {
  1083  		stream.producerSema <- struct{}{}
  1084  	}
  1085  
  1086  	return stream
  1087  }
  1088  
  1089  // Start starts the chanMsgStream.
  1090  func (ms *msgStream) Start() {
  1091  	ms.wg.Add(1)
  1092  	go ms.msgConsumer()
  1093  }
  1094  
  1095  // Stop stops the chanMsgStream.
  1096  func (ms *msgStream) Stop() {
  1097  	// TODO(roasbeef): signal too?
  1098  
  1099  	close(ms.quit)
  1100  
  1101  	// Now that we've closed the channel, we'll repeatedly signal the msg
  1102  	// consumer until we've detected that it has exited.
  1103  	for atomic.LoadInt32(&ms.streamShutdown) == 0 {
  1104  		ms.msgCond.Signal()
  1105  		time.Sleep(time.Millisecond * 100)
  1106  	}
  1107  
  1108  	ms.wg.Wait()
  1109  }
  1110  
  1111  // msgConsumer is the main goroutine that streams messages from the peer's
  1112  // readHandler directly to the target channel.
  1113  func (ms *msgStream) msgConsumer() {
  1114  	defer ms.wg.Done()
  1115  	defer peerLog.Tracef(ms.stopMsg)
  1116  	defer atomic.StoreInt32(&ms.streamShutdown, 1)
  1117  
  1118  	peerLog.Tracef(ms.startMsg)
  1119  
  1120  	for {
  1121  		// First, we'll check our condition. If the queue of messages
  1122  		// is empty, then we'll wait until a new item is added.
  1123  		ms.msgCond.L.Lock()
  1124  		for len(ms.msgs) == 0 {
  1125  			ms.msgCond.Wait()
  1126  
  1127  			// If we woke up in order to exit, then we'll do so.
  1128  			// Otherwise, we'll check the message queue for any new
  1129  			// items.
  1130  			select {
  1131  			case <-ms.peer.quit:
  1132  				ms.msgCond.L.Unlock()
  1133  				return
  1134  			case <-ms.quit:
  1135  				ms.msgCond.L.Unlock()
  1136  				return
  1137  			default:
  1138  			}
  1139  		}
  1140  
  1141  		// Grab the message off the front of the queue, shifting the
  1142  		// slice's reference down one in order to remove the message
  1143  		// from the queue.
  1144  		msg := ms.msgs[0]
  1145  		ms.msgs[0] = nil // Set to nil to prevent GC leak.
  1146  		ms.msgs = ms.msgs[1:]
  1147  
  1148  		ms.msgCond.L.Unlock()
  1149  
  1150  		ms.apply(msg)
  1151  
  1152  		// We've just successfully processed an item, so we'll signal
  1153  		// to the producer that a new slot in the buffer. We'll use
  1154  		// this to bound the size of the buffer to avoid allowing it to
  1155  		// grow indefinitely.
  1156  		select {
  1157  		case ms.producerSema <- struct{}{}:
  1158  		case <-ms.peer.quit:
  1159  			return
  1160  		case <-ms.quit:
  1161  			return
  1162  		}
  1163  	}
  1164  }
  1165  
  1166  // AddMsg adds a new message to the msgStream. This function is safe for
  1167  // concurrent access.
  1168  func (ms *msgStream) AddMsg(msg lnwire.Message) {
  1169  	// First, we'll attempt to receive from the producerSema struct. This
  1170  	// acts as a sempahore to prevent us from indefinitely buffering
  1171  	// incoming items from the wire. Either the msg queue isn't full, and
  1172  	// we'll not block, or the queue is full, and we'll block until either
  1173  	// we're signalled to quit, or a slot is freed up.
  1174  	select {
  1175  	case <-ms.producerSema:
  1176  	case <-ms.peer.quit:
  1177  		return
  1178  	case <-ms.quit:
  1179  		return
  1180  	}
  1181  
  1182  	// Next, we'll lock the condition, and add the message to the end of
  1183  	// the message queue.
  1184  	ms.msgCond.L.Lock()
  1185  	ms.msgs = append(ms.msgs, msg)
  1186  	ms.msgCond.L.Unlock()
  1187  
  1188  	// With the message added, we signal to the msgConsumer that there are
  1189  	// additional messages to consume.
  1190  	ms.msgCond.Signal()
  1191  }
  1192  
  1193  // waitUntilLinkActive waits until the target link is active and returns a
  1194  // ChannelLink to pass messages to. It accomplishes this by subscribing to
  1195  // an ActiveLinkEvent which is emitted by the link when it first starts up.
  1196  func waitUntilLinkActive(p *Brontide,
  1197  	cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
  1198  
  1199  	// Subscribe to receive channel events.
  1200  	//
  1201  	// NOTE: If the link is already active by SubscribeChannelEvents, then
  1202  	// GetLink will retrieve the link and we can send messages. If the link
  1203  	// becomes active between SubscribeChannelEvents and GetLink, then GetLink
  1204  	// will retrieve the link. If the link becomes active after GetLink, then
  1205  	// we will get an ActiveLinkEvent notification and retrieve the link. If
  1206  	// the call to GetLink is before SubscribeChannelEvents, however, there
  1207  	// will be a race condition.
  1208  	sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
  1209  	if err != nil {
  1210  		// If we have a non-nil error, then the server is shutting down and we
  1211  		// can exit here and return nil. This means no message will be delivered
  1212  		// to the link.
  1213  		return nil
  1214  	}
  1215  	defer sub.Cancel()
  1216  
  1217  	// The link may already be active by this point, and we may have missed the
  1218  	// ActiveLinkEvent. Check if the link exists.
  1219  	link := p.fetchLinkFromKeyAndCid(cid)
  1220  	if link != nil {
  1221  		return link
  1222  	}
  1223  
  1224  	// If the link is nil, we must wait for it to be active.
  1225  	for {
  1226  		select {
  1227  		// A new event has been sent by the ChannelNotifier. We first check
  1228  		// whether the event is an ActiveLinkEvent. If it is, we'll check
  1229  		// that the event is for this channel. Otherwise, we discard the
  1230  		// message.
  1231  		case e := <-sub.Updates():
  1232  			event, ok := e.(channelnotifier.ActiveLinkEvent)
  1233  			if !ok {
  1234  				// Ignore this notification.
  1235  				continue
  1236  			}
  1237  
  1238  			chanPoint := event.ChannelPoint
  1239  
  1240  			// Check whether the retrieved chanPoint matches the target
  1241  			// channel id.
  1242  			if !cid.IsChanPoint(chanPoint) {
  1243  				continue
  1244  			}
  1245  
  1246  			// The link shouldn't be nil as we received an
  1247  			// ActiveLinkEvent. If it is nil, we return nil and the
  1248  			// calling function should catch it.
  1249  			return p.fetchLinkFromKeyAndCid(cid)
  1250  
  1251  		case <-p.quit:
  1252  			return nil
  1253  		}
  1254  	}
  1255  }
  1256  
  1257  // newChanMsgStream is used to create a msgStream between the peer and
  1258  // particular channel link in the htlcswitch. We utilize additional
  1259  // synchronization with the fundingManager to ensure we don't attempt to
  1260  // dispatch a message to a channel before it is fully active. A reference to the
  1261  // channel this stream forwards to is held in scope to prevent unnecessary
  1262  // lookups.
  1263  func newChanMsgStream(p *Brontide, cid lnwire.ChannelID) *msgStream {
  1264  
  1265  	var chanLink htlcswitch.ChannelUpdateHandler
  1266  
  1267  	apply := func(msg lnwire.Message) {
  1268  		// This check is fine because if the link no longer exists, it will
  1269  		// be removed from the activeChannels map and subsequent messages
  1270  		// shouldn't reach the chan msg stream.
  1271  		if chanLink == nil {
  1272  			chanLink = waitUntilLinkActive(p, cid)
  1273  
  1274  			// If the link is still not active and the calling function
  1275  			// errored out, just return.
  1276  			if chanLink == nil {
  1277  				return
  1278  			}
  1279  		}
  1280  
  1281  		// In order to avoid unnecessarily delivering message
  1282  		// as the peer is exiting, we'll check quickly to see
  1283  		// if we need to exit.
  1284  		select {
  1285  		case <-p.quit:
  1286  			return
  1287  		default:
  1288  		}
  1289  
  1290  		chanLink.HandleChannelUpdate(msg)
  1291  	}
  1292  
  1293  	return newMsgStream(p,
  1294  		fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]),
  1295  		fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]),
  1296  		1000,
  1297  		apply,
  1298  	)
  1299  }
  1300  
  1301  // newDiscMsgStream is used to setup a msgStream between the peer and the
  1302  // authenticated gossiper. This stream should be used to forward all remote
  1303  // channel announcements.
  1304  func newDiscMsgStream(p *Brontide) *msgStream {
  1305  	apply := func(msg lnwire.Message) {
  1306  		p.cfg.AuthGossiper.ProcessRemoteAnnouncement(msg, p)
  1307  	}
  1308  
  1309  	return newMsgStream(
  1310  		p,
  1311  		"Update stream for gossiper created",
  1312  		"Update stream for gossiper exited",
  1313  		1000,
  1314  		apply,
  1315  	)
  1316  }
  1317  
  1318  // readHandler is responsible for reading messages off the wire in series, then
  1319  // properly dispatching the handling of the message to the proper subsystem.
  1320  //
  1321  // NOTE: This method MUST be run as a goroutine.
  1322  func (p *Brontide) readHandler() {
  1323  	defer p.wg.Done()
  1324  
  1325  	// We'll stop the timer after a new messages is received, and also
  1326  	// reset it after we process the next message.
  1327  	idleTimer := time.AfterFunc(idleTimeout, func() {
  1328  		err := fmt.Errorf("peer %s no answer for %s -- disconnecting",
  1329  			p, idleTimeout)
  1330  		p.Disconnect(err)
  1331  	})
  1332  
  1333  	// Initialize our negotiated gossip sync method before reading messages
  1334  	// off the wire. When using gossip queries, this ensures a gossip
  1335  	// syncer is active by the time query messages arrive.
  1336  	//
  1337  	// TODO(conner): have peer store gossip syncer directly and bypass
  1338  	// gossiper?
  1339  	p.initGossipSync()
  1340  
  1341  	discStream := newDiscMsgStream(p)
  1342  	discStream.Start()
  1343  	defer discStream.Stop()
  1344  out:
  1345  	for atomic.LoadInt32(&p.disconnect) == 0 {
  1346  		nextMsg, err := p.readNextMessage()
  1347  		if !idleTimer.Stop() {
  1348  			select {
  1349  			case <-idleTimer.C:
  1350  			default:
  1351  			}
  1352  		}
  1353  		if err != nil {
  1354  			peerLog.Infof("unable to read message from %v: %v",
  1355  				p, err)
  1356  
  1357  			// If we could not read our peer's message due to an
  1358  			// unknown type or invalid alias, we continue processing
  1359  			// as normal. We store unknown message and address
  1360  			// types, as they may provide debugging insight.
  1361  			switch e := err.(type) {
  1362  			// If this is just a message we don't yet recognize,
  1363  			// we'll continue processing as normal as this allows
  1364  			// us to introduce new messages in a forwards
  1365  			// compatible manner.
  1366  			case *lnwire.UnknownMessage:
  1367  				p.storeError(e)
  1368  				idleTimer.Reset(idleTimeout)
  1369  				continue
  1370  
  1371  			// If they sent us an address type that we don't yet
  1372  			// know of, then this isn't a wire error, so we'll
  1373  			// simply continue parsing the remainder of their
  1374  			// messages.
  1375  			case *lnwire.ErrUnknownAddrType:
  1376  				p.storeError(e)
  1377  				idleTimer.Reset(idleTimeout)
  1378  				continue
  1379  
  1380  			// If the NodeAnnouncement has an invalid alias, then
  1381  			// we'll log that error above and continue so we can
  1382  			// continue to read messages from the peer. We do not
  1383  			// store this error because it is of little debugging
  1384  			// value.
  1385  			case *lnwire.ErrInvalidNodeAlias:
  1386  				idleTimer.Reset(idleTimeout)
  1387  				continue
  1388  
  1389  			// If the error we encountered wasn't just a message we
  1390  			// didn't recognize, then we'll stop all processing as
  1391  			// this is a fatal error.
  1392  			default:
  1393  				break out
  1394  			}
  1395  		}
  1396  
  1397  		var (
  1398  			targetChan   lnwire.ChannelID
  1399  			isLinkUpdate bool
  1400  		)
  1401  
  1402  		switch msg := nextMsg.(type) {
  1403  		case *lnwire.Pong:
  1404  			// When we receive a Pong message in response to our
  1405  			// last ping message, we'll use the time in which we
  1406  			// sent the ping message to measure a rough estimate of
  1407  			// round trip time.
  1408  			pingSendTime := atomic.LoadInt64(&p.pingLastSend)
  1409  			delay := (time.Now().UnixNano() - pingSendTime) / 1000
  1410  			atomic.StoreInt64(&p.pingTime, delay)
  1411  			p.handlePong()
  1412  
  1413  		case *lnwire.Ping:
  1414  			// First, we'll store their latest ping payload within
  1415  			// the relevant atomic variable.
  1416  			p.lastPingPayload.Store(msg.PaddingBytes[:])
  1417  
  1418  			// Next, we'll send over the amount of specified pong
  1419  			// bytes.
  1420  			pong := lnwire.NewPong(p.cfg.PongBuf[0:msg.NumPongBytes])
  1421  			p.queueMsg(pong, nil)
  1422  
  1423  		case *lnwire.OpenChannel,
  1424  			*lnwire.AcceptChannel,
  1425  			*lnwire.FundingCreated,
  1426  			*lnwire.FundingSigned,
  1427  			*lnwire.FundingLocked:
  1428  
  1429  			p.cfg.FundingManager.ProcessFundingMsg(msg, p)
  1430  
  1431  		case *lnwire.Shutdown:
  1432  			select {
  1433  			case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
  1434  			case <-p.quit:
  1435  				break out
  1436  			}
  1437  		case *lnwire.ClosingSigned:
  1438  			select {
  1439  			case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
  1440  			case <-p.quit:
  1441  				break out
  1442  			}
  1443  
  1444  		case *lnwire.Error:
  1445  			targetChan = msg.ChanID
  1446  			isLinkUpdate = p.handleError(msg)
  1447  
  1448  		case *lnwire.ChannelReestablish:
  1449  			targetChan = msg.ChanID
  1450  			isLinkUpdate = p.isActiveChannel(targetChan)
  1451  
  1452  			// If we failed to find the link in question, and the
  1453  			// message received was a channel sync message, then
  1454  			// this might be a peer trying to resync closed channel.
  1455  			// In this case we'll try to resend our last channel
  1456  			// sync message, such that the peer can recover funds
  1457  			// from the closed channel.
  1458  			if !isLinkUpdate {
  1459  				err := p.resendChanSyncMsg(targetChan)
  1460  				if err != nil {
  1461  					// TODO(halseth): send error to peer?
  1462  					peerLog.Errorf("resend failed: %v",
  1463  						err)
  1464  				}
  1465  			}
  1466  
  1467  		case LinkUpdater:
  1468  			targetChan = msg.TargetChanID()
  1469  			isLinkUpdate = p.isActiveChannel(targetChan)
  1470  
  1471  		case *lnwire.ChannelUpdate,
  1472  			*lnwire.ChannelAnnouncement,
  1473  			*lnwire.NodeAnnouncement,
  1474  			*lnwire.AnnounceSignatures,
  1475  			*lnwire.GossipTimestampRange,
  1476  			*lnwire.QueryShortChanIDs,
  1477  			*lnwire.QueryChannelRange,
  1478  			*lnwire.ReplyChannelRange,
  1479  			*lnwire.ReplyShortChanIDsEnd:
  1480  
  1481  			discStream.AddMsg(msg)
  1482  
  1483  		case *lnwire.Custom:
  1484  			err := p.handleCustomMessage(msg)
  1485  			if err != nil {
  1486  				p.storeError(err)
  1487  				peerLog.Errorf("peer: %v, %v", p, err)
  1488  			}
  1489  
  1490  		default:
  1491  			// If the message we received is unknown to us, store
  1492  			// the type to track the failure.
  1493  			err := fmt.Errorf("unknown message type %v received",
  1494  				uint16(msg.MsgType()))
  1495  			p.storeError(err)
  1496  
  1497  			peerLog.Errorf("peer: %v, %v", p, err)
  1498  		}
  1499  
  1500  		if isLinkUpdate {
  1501  			// If this is a channel update, then we need to feed it
  1502  			// into the channel's in-order message stream.
  1503  			chanStream, ok := p.activeMsgStreams[targetChan]
  1504  			if !ok {
  1505  				// If a stream hasn't yet been created, then
  1506  				// we'll do so, add it to the map, and finally
  1507  				// start it.
  1508  				chanStream = newChanMsgStream(p, targetChan)
  1509  				p.activeMsgStreams[targetChan] = chanStream
  1510  				chanStream.Start()
  1511  				defer chanStream.Stop()
  1512  			}
  1513  
  1514  			// With the stream obtained, add the message to the
  1515  			// stream so we can continue processing message.
  1516  			chanStream.AddMsg(nextMsg)
  1517  		}
  1518  
  1519  		idleTimer.Reset(idleTimeout)
  1520  	}
  1521  
  1522  	p.Disconnect(errors.New("read handler closed"))
  1523  
  1524  	peerLog.Tracef("readHandler for peer %v done", p)
  1525  }
  1526  
  1527  // handleCustomMessage handles the given custom message if a handler is
  1528  // registered.
  1529  func (p *Brontide) handleCustomMessage(msg *lnwire.Custom) error {
  1530  	if p.cfg.HandleCustomMessage == nil {
  1531  		return fmt.Errorf("no custom message handler for "+
  1532  			"message type %v", uint16(msg.MsgType()))
  1533  	}
  1534  
  1535  	return p.cfg.HandleCustomMessage(p.PubKey(), msg)
  1536  }
  1537  
  1538  // isActiveChannel returns true if the provided channel id is active, otherwise
  1539  // returns false.
  1540  func (p *Brontide) isActiveChannel(chanID lnwire.ChannelID) bool {
  1541  	p.activeChanMtx.RLock()
  1542  	_, ok := p.activeChannels[chanID]
  1543  	p.activeChanMtx.RUnlock()
  1544  	return ok
  1545  }
  1546  
  1547  // storeError stores an error in our peer's buffer of recent errors with the
  1548  // current timestamp. Errors are only stored if we have at least one active
  1549  // channel with the peer to mitigate a dos vector where a peer costlessly
  1550  // connects to us and spams us with errors.
  1551  func (p *Brontide) storeError(err error) {
  1552  	var haveChannels bool
  1553  
  1554  	p.activeChanMtx.RLock()
  1555  	for _, channel := range p.activeChannels {
  1556  		// Pending channels will be nil in the activeChannels map.
  1557  		if channel == nil {
  1558  			continue
  1559  		}
  1560  
  1561  		haveChannels = true
  1562  		break
  1563  	}
  1564  	p.activeChanMtx.RUnlock()
  1565  
  1566  	// If we do not have any active channels with the peer, we do not store
  1567  	// errors as a dos mitigation.
  1568  	if !haveChannels {
  1569  		peerLog.Tracef("no channels with peer: %v, not storing err", p)
  1570  		return
  1571  	}
  1572  
  1573  	p.cfg.ErrorBuffer.Add(
  1574  		&TimestampedError{Timestamp: time.Now(), Error: err},
  1575  	)
  1576  }
  1577  
  1578  // handleError processes an error message read from the remote peer. The boolean
  1579  // returns indicates whether the message should be delivered to a targeted peer.
  1580  // It stores the error we received from the peer in memory if we have a channel
  1581  // open with the peer.
  1582  //
  1583  // NOTE: This method should only be called from within the readHandler.
  1584  func (p *Brontide) handleError(msg *lnwire.Error) bool {
  1585  	// Store the error we have received.
  1586  	p.storeError(msg)
  1587  
  1588  	switch {
  1589  
  1590  	// In the case of an all-zero channel ID we want to forward the error to
  1591  	// all channels with this peer.
  1592  	case msg.ChanID == lnwire.ConnectionWideID:
  1593  		for _, chanStream := range p.activeMsgStreams {
  1594  			chanStream.AddMsg(msg)
  1595  		}
  1596  		return false
  1597  
  1598  	// If the channel ID for the error message corresponds to a pending
  1599  	// channel, then the funding manager will handle the error.
  1600  	case p.cfg.FundingManager.IsPendingChannel(msg.ChanID, p):
  1601  		p.cfg.FundingManager.ProcessFundingMsg(msg, p)
  1602  		return false
  1603  
  1604  	// If not we hand the error to the channel link for this channel.
  1605  	case p.isActiveChannel(msg.ChanID):
  1606  		return true
  1607  
  1608  	default:
  1609  		return false
  1610  	}
  1611  }
  1612  
  1613  // messageSummary returns a human-readable string that summarizes a
  1614  // incoming/outgoing message. Not all messages will have a summary, only those
  1615  // which have additional data that can be informative at a glance.
  1616  func messageSummary(msg lnwire.Message) string {
  1617  	switch msg := msg.(type) {
  1618  	case *lnwire.Init:
  1619  		// No summary.
  1620  		return ""
  1621  
  1622  	case *lnwire.OpenChannel:
  1623  		return fmt.Sprintf("temp_chan_id=%x, chain=%v, csv=%v, amt=%v, "+
  1624  			"push_amt=%v, reserve=%v, flags=%v",
  1625  			msg.PendingChannelID[:], msg.ChainHash,
  1626  			msg.CsvDelay, msg.FundingAmount, msg.PushAmount,
  1627  			msg.ChannelReserve, msg.ChannelFlags)
  1628  
  1629  	case *lnwire.AcceptChannel:
  1630  		return fmt.Sprintf("temp_chan_id=%x, reserve=%v, csv=%v, num_confs=%v",
  1631  			msg.PendingChannelID[:], msg.ChannelReserve, msg.CsvDelay,
  1632  			msg.MinAcceptDepth)
  1633  
  1634  	case *lnwire.FundingCreated:
  1635  		return fmt.Sprintf("temp_chan_id=%x, chan_point=%v",
  1636  			msg.PendingChannelID[:], msg.FundingPoint)
  1637  
  1638  	case *lnwire.FundingSigned:
  1639  		return fmt.Sprintf("chan_id=%v", msg.ChanID)
  1640  
  1641  	case *lnwire.FundingLocked:
  1642  		return fmt.Sprintf("chan_id=%v, next_point=%x",
  1643  			msg.ChanID, msg.NextPerCommitmentPoint.SerializeCompressed())
  1644  
  1645  	case *lnwire.Shutdown:
  1646  		return fmt.Sprintf("chan_id=%v, script=%x", msg.ChannelID,
  1647  			msg.Address[:])
  1648  
  1649  	case *lnwire.ClosingSigned:
  1650  		return fmt.Sprintf("chan_id=%v, fee_atoms=%v", msg.ChannelID,
  1651  			msg.FeeAtoms)
  1652  
  1653  	case *lnwire.UpdateAddHTLC:
  1654  		return fmt.Sprintf("chan_id=%v, id=%v, amt=%v, expiry=%v, hash=%x",
  1655  			msg.ChanID, msg.ID, msg.Amount, msg.Expiry, msg.PaymentHash[:])
  1656  
  1657  	case *lnwire.UpdateFailHTLC:
  1658  		return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
  1659  			msg.ID, msg.Reason)
  1660  
  1661  	case *lnwire.UpdateFulfillHTLC:
  1662  		return fmt.Sprintf("chan_id=%v, id=%v, pre_image=%x",
  1663  			msg.ChanID, msg.ID, msg.PaymentPreimage[:])
  1664  
  1665  	case *lnwire.CommitSig:
  1666  		return fmt.Sprintf("chan_id=%v, num_htlcs=%v", msg.ChanID,
  1667  			len(msg.HtlcSigs))
  1668  
  1669  	case *lnwire.RevokeAndAck:
  1670  		return fmt.Sprintf("chan_id=%v, rev=%x, next_point=%x",
  1671  			msg.ChanID, msg.Revocation[:],
  1672  			msg.NextRevocationKey.SerializeCompressed())
  1673  
  1674  	case *lnwire.UpdateFailMalformedHTLC:
  1675  		return fmt.Sprintf("chan_id=%v, id=%v, fail_code=%v",
  1676  			msg.ChanID, msg.ID, msg.FailureCode)
  1677  
  1678  	case *lnwire.Error:
  1679  		return fmt.Sprintf("%v", msg.Error())
  1680  
  1681  	case *lnwire.AnnounceSignatures:
  1682  		return fmt.Sprintf("chan_id=%v, short_chan_id=%s", msg.ChannelID,
  1683  			msg.ShortChannelID)
  1684  
  1685  	case *lnwire.ChannelAnnouncement:
  1686  		return fmt.Sprintf("chain_hash=%v, short_chan_id=%s",
  1687  			msg.ChainHash, msg.ShortChannelID)
  1688  
  1689  	case *lnwire.ChannelUpdate:
  1690  		return fmt.Sprintf("chain_hash=%v, short_chan_id=%s, "+
  1691  			"mflags=%v, cflags=%v, update_time=%v", msg.ChainHash,
  1692  			msg.ShortChannelID, msg.MessageFlags,
  1693  			msg.ChannelFlags, time.Unix(int64(msg.Timestamp), 0))
  1694  
  1695  	case *lnwire.NodeAnnouncement:
  1696  		return fmt.Sprintf("node=%x, update_time=%v",
  1697  			msg.NodeID, time.Unix(int64(msg.Timestamp), 0))
  1698  
  1699  	case *lnwire.Ping:
  1700  		return fmt.Sprintf("ping_bytes=%x", msg.PaddingBytes[:])
  1701  
  1702  	case *lnwire.Pong:
  1703  		return fmt.Sprintf("pong_bytes=%x", msg.PongBytes[:])
  1704  
  1705  	case *lnwire.UpdateFee:
  1706  		return fmt.Sprintf("chan_id=%v, fee_update_at=%v",
  1707  			msg.ChanID, int64(msg.FeePerKB))
  1708  
  1709  	case *lnwire.ChannelReestablish:
  1710  		return fmt.Sprintf("next_local_height=%v, remote_tail_height=%v, chan_id=%v",
  1711  			msg.NextLocalCommitHeight, msg.RemoteCommitTailHeight, msg.ChanID)
  1712  
  1713  	case *lnwire.ReplyShortChanIDsEnd:
  1714  		return fmt.Sprintf("chain_hash=%v, complete=%v", msg.ChainHash,
  1715  			msg.Complete)
  1716  
  1717  	case *lnwire.ReplyChannelRange:
  1718  		return fmt.Sprintf("start_height=%v, end_height=%v, "+
  1719  			"num_chans=%v, encoding=%v", msg.FirstBlockHeight,
  1720  			msg.LastBlockHeight(), len(msg.ShortChanIDs),
  1721  			msg.EncodingType)
  1722  
  1723  	case *lnwire.QueryShortChanIDs:
  1724  		return fmt.Sprintf("chain_hash=%v, encoding=%v, num_chans=%v",
  1725  			msg.ChainHash, msg.EncodingType, len(msg.ShortChanIDs))
  1726  
  1727  	case *lnwire.QueryChannelRange:
  1728  		return fmt.Sprintf("chain_hash=%v, start_height=%v, "+
  1729  			"end_height=%v", msg.ChainHash, msg.FirstBlockHeight,
  1730  			msg.LastBlockHeight())
  1731  
  1732  	case *lnwire.GossipTimestampRange:
  1733  		return fmt.Sprintf("chain_hash=%v, first_stamp=%v, "+
  1734  			"stamp_range=%v", msg.ChainHash,
  1735  			time.Unix(int64(msg.FirstTimestamp), 0),
  1736  			msg.TimestampRange)
  1737  
  1738  	case *lnwire.Custom:
  1739  		return fmt.Sprintf("type=%d", msg.Type)
  1740  	}
  1741  
  1742  	return ""
  1743  }
  1744  
  1745  // logWireMessage logs the receipt or sending of particular wire message. This
  1746  // function is used rather than just logging the message in order to produce
  1747  // less spammy log messages in trace mode by setting the 'Curve" parameter to
  1748  // nil. Doing this avoids printing out each of the field elements in the curve
  1749  // parameters for secp256k1.
  1750  func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) {
  1751  	summaryPrefix := "Received"
  1752  	if !read {
  1753  		summaryPrefix = "Sending"
  1754  	}
  1755  
  1756  	logger := peerLog
  1757  	switch msg.(type) {
  1758  	case *lnwire.Ping, *lnwire.Pong:
  1759  		logger = pingLog
  1760  	}
  1761  
  1762  	logger.Debugf("%v", newLogClosure(func() string {
  1763  		// Debug summary of message.
  1764  		summary := messageSummary(msg)
  1765  		if len(summary) > 0 {
  1766  			summary = "(" + summary + ")"
  1767  		}
  1768  
  1769  		preposition := "to"
  1770  		if read {
  1771  			preposition = "from"
  1772  		}
  1773  
  1774  		var msgType string
  1775  		if msg.MsgType() < lnwire.CustomTypeStart {
  1776  			msgType = msg.MsgType().String()
  1777  		} else {
  1778  			msgType = "custom"
  1779  		}
  1780  
  1781  		return fmt.Sprintf("%v %v%s %v %s", summaryPrefix,
  1782  			msgType, summary, preposition, p)
  1783  	}))
  1784  
  1785  	prefix := "readMessage from"
  1786  	if !read {
  1787  		prefix = "writeMessage to"
  1788  	}
  1789  
  1790  	logger.Tracef(prefix+" %v: %v", p, newLogClosure(func() string {
  1791  		return spew.Sdump(msg)
  1792  	}))
  1793  }
  1794  
  1795  // writeMessage writes and flushes the target lnwire.Message to the remote peer.
  1796  // If the passed message is nil, this method will only try to flush an existing
  1797  // message buffered on the connection. It is safe to call this method again
  1798  // with a nil message iff a timeout error is returned. This will continue to
  1799  // flush the pending message to the wire.
  1800  func (p *Brontide) writeMessage(msg lnwire.Message) error {
  1801  	// Simply exit if we're shutting down.
  1802  	if atomic.LoadInt32(&p.disconnect) != 0 {
  1803  		return lnpeer.ErrPeerExiting
  1804  	}
  1805  
  1806  	// Only log the message on the first attempt.
  1807  	if msg != nil {
  1808  		p.logWireMessage(msg, false)
  1809  	}
  1810  
  1811  	noiseConn := p.cfg.Conn
  1812  
  1813  	flushMsg := func() error {
  1814  		// Ensure the write deadline is set before we attempt to send
  1815  		// the message.
  1816  		writeDeadline := time.Now().Add(writeMessageTimeout)
  1817  		err := noiseConn.SetWriteDeadline(writeDeadline)
  1818  		if err != nil {
  1819  			return err
  1820  		}
  1821  
  1822  		// Flush the pending message to the wire. If an error is
  1823  		// encountered, e.g. write timeout, the number of bytes written
  1824  		// so far will be returned.
  1825  		n, err := noiseConn.Flush()
  1826  
  1827  		// Record the number of bytes written on the wire, if any.
  1828  		if n > 0 {
  1829  			atomic.AddUint64(&p.bytesSent, uint64(n))
  1830  		}
  1831  
  1832  		return err
  1833  	}
  1834  
  1835  	// If the current message has already been serialized, encrypted, and
  1836  	// buffered on the underlying connection we will skip straight to
  1837  	// flushing it to the wire.
  1838  	if msg == nil {
  1839  		return flushMsg()
  1840  	}
  1841  
  1842  	// Otherwise, this is a new message. We'll acquire a write buffer to
  1843  	// serialize the message and buffer the ciphertext on the connection.
  1844  	err := p.cfg.WritePool.Submit(func(buf *bytes.Buffer) error {
  1845  		// Using a buffer allocated by the write pool, encode the
  1846  		// message directly into the buffer.
  1847  		_, writeErr := lnwire.WriteMessage(buf, msg, 0)
  1848  		if writeErr != nil {
  1849  			return writeErr
  1850  		}
  1851  
  1852  		// Finally, write the message itself in a single swoop. This
  1853  		// will buffer the ciphertext on the underlying connection. We
  1854  		// will defer flushing the message until the write pool has been
  1855  		// released.
  1856  		return noiseConn.WriteMessage(buf.Bytes())
  1857  	})
  1858  	if err != nil {
  1859  		return err
  1860  	}
  1861  
  1862  	return flushMsg()
  1863  }
  1864  
  1865  // writeHandler is a goroutine dedicated to reading messages off of an incoming
  1866  // queue, and writing them out to the wire. This goroutine coordinates with the
  1867  // queueHandler in order to ensure the incoming message queue is quickly
  1868  // drained.
  1869  //
  1870  // NOTE: This method MUST be run as a goroutine.
  1871  func (p *Brontide) writeHandler() {
  1872  	// We'll stop the timer after a new messages is sent, and also reset it
  1873  	// after we process the next message.
  1874  	idleTimer := time.AfterFunc(idleTimeout, func() {
  1875  		err := fmt.Errorf("peer %s no write for %s -- disconnecting",
  1876  			p, idleTimeout)
  1877  		p.Disconnect(err)
  1878  	})
  1879  
  1880  	var exitErr error
  1881  
  1882  	// Track the time of the last sent msg using a wall time clock instead
  1883  	// of the default monotonic clock. This is used to detect local suspend
  1884  	// scenarios, where the remote peer already disconnected from the local
  1885  	// peer but we missed the TCP FIN and thus any outbound messages will
  1886  	// be discarded. See https://github.com/golang/go/issues/36141 for a
  1887  	// discussion on how suspend affects network deadlines.
  1888  	lastSentTimeNoMono := time.Now().Round(0)
  1889  
  1890  out:
  1891  	for {
  1892  		select {
  1893  		case outMsg := <-p.sendQueue:
  1894  			// If we're about to send a ping message, then log the
  1895  			// exact time in which we send the message so we can
  1896  			// use the delay as a rough estimate of latency to the
  1897  			// remote peer.
  1898  			if _, ok := outMsg.msg.(*lnwire.Ping); ok {
  1899  				// TODO(roasbeef): do this before the write?
  1900  				// possibly account for processing within func?
  1901  				now := time.Now().UnixNano()
  1902  				atomic.StoreInt64(&p.pingLastSend, now)
  1903  			}
  1904  
  1905  			// Record the time at which we first attempt to send the
  1906  			// message.
  1907  			startTime := time.Now()
  1908  
  1909  			// If it took longer than the stall timeout time to
  1910  			// unqueue a message to send, this means we stalled and
  1911  			// the remote peer likely disconnected from us without
  1912  			// us noticing. This can happen, for example, when
  1913  			// suspending the computer, which does _not_ cause
  1914  			// network deadlines to trigger locally but causes
  1915  			// remote peers to disconnect.
  1916  			nowNoMono := startTime.Round(0)
  1917  			wallTimeInterval := nowNoMono.Sub(lastSentTimeNoMono)
  1918  			if wallTimeInterval > sendStallTimeout {
  1919  				peerLog.Warnf("Dequeuing from sendQueue took %s "+
  1920  					"while max allowed is %s. Forcibly "+
  1921  					"disconnecting from peer.", wallTimeInterval,
  1922  					sendStallTimeout)
  1923  				exitErr = fmt.Errorf("sendQueue receive stalled "+
  1924  					"for %s in writeHandler", wallTimeInterval)
  1925  				break out
  1926  			}
  1927  			lastSentTimeNoMono = nowNoMono
  1928  
  1929  		retry:
  1930  			// Write out the message to the socket. If a timeout
  1931  			// error is encountered, we will catch this and retry
  1932  			// after backing off in case the remote peer is just
  1933  			// slow to process messages from the wire.
  1934  			err := p.writeMessage(outMsg.msg)
  1935  			if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
  1936  				peerLog.Debugf("Write timeout detected for "+
  1937  					"peer %s, first write for message "+
  1938  					"attempted %v ago", p,
  1939  					time.Since(startTime))
  1940  
  1941  				// If we received a timeout error, this implies
  1942  				// that the message was buffered on the
  1943  				// connection successfully and that a flush was
  1944  				// attempted. We'll set the message to nil so
  1945  				// that on a subsequent pass we only try to
  1946  				// flush the buffered message, and forgo
  1947  				// reserializing or reencrypting it.
  1948  				outMsg.msg = nil
  1949  
  1950  				goto retry
  1951  			}
  1952  
  1953  			// The write succeeded, reset the idle timer to prevent
  1954  			// us from disconnecting the peer.
  1955  			if !idleTimer.Stop() {
  1956  				select {
  1957  				case <-idleTimer.C:
  1958  				default:
  1959  				}
  1960  			}
  1961  			idleTimer.Reset(idleTimeout)
  1962  
  1963  			// If the peer requested a synchronous write, respond
  1964  			// with the error.
  1965  			if outMsg.errChan != nil {
  1966  				outMsg.errChan <- err
  1967  			}
  1968  
  1969  			if err != nil {
  1970  				exitErr = fmt.Errorf("unable to write "+
  1971  					"message: %v", err)
  1972  				break out
  1973  			}
  1974  
  1975  		case <-p.quit:
  1976  			exitErr = lnpeer.ErrPeerExiting
  1977  			break out
  1978  		}
  1979  	}
  1980  
  1981  	// Avoid an exit deadlock by ensuring WaitGroups are decremented before
  1982  	// disconnect.
  1983  	p.wg.Done()
  1984  
  1985  	p.Disconnect(exitErr)
  1986  
  1987  	peerLog.Tracef("writeHandler for peer %v done", p)
  1988  }
  1989  
  1990  // queueHandler is responsible for accepting messages from outside subsystems
  1991  // to be eventually sent out on the wire by the writeHandler.
  1992  //
  1993  // NOTE: This method MUST be run as a goroutine.
  1994  func (p *Brontide) queueHandler() {
  1995  	defer p.wg.Done()
  1996  
  1997  	// priorityMsgs holds an in order list of messages deemed high-priority
  1998  	// to be added to the sendQueue. This predominately includes messages
  1999  	// from the funding manager and htlcswitch.
  2000  	priorityMsgs := list.New()
  2001  
  2002  	// lazyMsgs holds an in order list of messages deemed low-priority to be
  2003  	// added to the sendQueue only after all high-priority messages have
  2004  	// been queued. This predominately includes messages from the gossiper.
  2005  	lazyMsgs := list.New()
  2006  
  2007  	for {
  2008  		// Examine the front of the priority queue, if it is empty check
  2009  		// the low priority queue.
  2010  		elem := priorityMsgs.Front()
  2011  		if elem == nil {
  2012  			elem = lazyMsgs.Front()
  2013  		}
  2014  
  2015  		if elem != nil {
  2016  			front := elem.Value.(outgoingMsg)
  2017  
  2018  			// There's an element on the queue, try adding
  2019  			// it to the sendQueue. We also watch for
  2020  			// messages on the outgoingQueue, in case the
  2021  			// writeHandler cannot accept messages on the
  2022  			// sendQueue.
  2023  			select {
  2024  			case p.sendQueue <- front:
  2025  				if front.priority {
  2026  					priorityMsgs.Remove(elem)
  2027  				} else {
  2028  					lazyMsgs.Remove(elem)
  2029  				}
  2030  			case msg := <-p.outgoingQueue:
  2031  				if msg.priority {
  2032  					priorityMsgs.PushBack(msg)
  2033  				} else {
  2034  					lazyMsgs.PushBack(msg)
  2035  				}
  2036  			case <-p.quit:
  2037  				return
  2038  			}
  2039  		} else {
  2040  			// If there weren't any messages to send to the
  2041  			// writeHandler, then we'll accept a new message
  2042  			// into the queue from outside sub-systems.
  2043  			select {
  2044  			case msg := <-p.outgoingQueue:
  2045  				if msg.priority {
  2046  					priorityMsgs.PushBack(msg)
  2047  				} else {
  2048  					lazyMsgs.PushBack(msg)
  2049  				}
  2050  			case <-p.quit:
  2051  				return
  2052  			}
  2053  		}
  2054  	}
  2055  }
  2056  
  2057  // pingHandler is responsible for periodically sending ping messages to the
  2058  // remote peer in order to keep the connection alive and/or determine if the
  2059  // connection is still active.
  2060  //
  2061  // NOTE: This method MUST be run as a goroutine.
  2062  func (p *Brontide) pingHandler() {
  2063  	defer p.wg.Done()
  2064  
  2065  	pingTicker := time.NewTicker(pingInterval)
  2066  	defer pingTicker.Stop()
  2067  
  2068  	// TODO(roasbeef): make dynamic in order to create fake cover traffic
  2069  	const numPongBytes = 16
  2070  
  2071  	blockEpochs, err := p.cfg.ChainNotifier.RegisterBlockEpochNtfn(nil)
  2072  	if err != nil {
  2073  		peerLog.Errorf("unable to establish block epoch "+
  2074  			"subscription: %v", err)
  2075  		return
  2076  	}
  2077  	defer blockEpochs.Cancel()
  2078  
  2079  	var (
  2080  		pingPayload [wire.MaxBlockHeaderPayload]byte
  2081  		blockHeader *wire.BlockHeader
  2082  	)
  2083  out:
  2084  	for {
  2085  		select {
  2086  		// Each time a new block comes in, we'll copy the raw header
  2087  		// contents over to our ping payload declared above. Over time,
  2088  		// we'll use this to disseminate the latest block header
  2089  		// between all our peers, which can later be used to
  2090  		// cross-check our own view of the network to mitigate various
  2091  		// types of eclipse attacks.
  2092  		case epoch, ok := <-blockEpochs.Epochs:
  2093  			if !ok {
  2094  				peerLog.Debugf("block notifications " +
  2095  					"canceled")
  2096  				return
  2097  			}
  2098  
  2099  			blockHeader = epoch.BlockHeader
  2100  			headerBuf := bytes.NewBuffer(pingPayload[0:0])
  2101  			err := blockHeader.Serialize(headerBuf)
  2102  			if err != nil {
  2103  				peerLog.Errorf("unable to encode header: %v",
  2104  					err)
  2105  			}
  2106  
  2107  		case <-pingTicker.C:
  2108  			pingMsg := &lnwire.Ping{
  2109  				NumPongBytes: numPongBytes,
  2110  				PaddingBytes: pingPayload[:],
  2111  			}
  2112  
  2113  			p.queueMsg(pingMsg, nil)
  2114  
  2115  		case <-p.enforcePingChan:
  2116  			pingTicker.Reset(pingInterval)
  2117  
  2118  			pingMsg := &lnwire.Ping{
  2119  				NumPongBytes: numPongBytes,
  2120  				PaddingBytes: pingPayload[:],
  2121  			}
  2122  
  2123  			p.queueMsg(pingMsg, nil)
  2124  
  2125  		case <-p.quit:
  2126  			break out
  2127  		}
  2128  	}
  2129  }
  2130  
  2131  // PingTime returns the estimated ping time to the peer in microseconds.
  2132  func (p *Brontide) PingTime() int64 {
  2133  	return atomic.LoadInt64(&p.pingTime)
  2134  }
  2135  
  2136  // queueMsg adds the lnwire.Message to the back of the high priority send queue.
  2137  // If the errChan is non-nil, an error is sent back if the msg failed to queue
  2138  // or failed to write, and nil otherwise.
  2139  func (p *Brontide) queueMsg(msg lnwire.Message, errChan chan error) {
  2140  	p.queue(true, msg, errChan)
  2141  }
  2142  
  2143  // queueMsgLazy adds the lnwire.Message to the back of the low priority send
  2144  // queue. If the errChan is non-nil, an error is sent back if the msg failed to
  2145  // queue or failed to write, and nil otherwise.
  2146  func (p *Brontide) queueMsgLazy(msg lnwire.Message, errChan chan error) {
  2147  	p.queue(false, msg, errChan)
  2148  }
  2149  
  2150  // queue sends a given message to the queueHandler using the passed priority. If
  2151  // the errChan is non-nil, an error is sent back if the msg failed to queue or
  2152  // failed to write, and nil otherwise.
  2153  func (p *Brontide) queue(priority bool, msg lnwire.Message,
  2154  	errChan chan error) {
  2155  
  2156  	select {
  2157  	case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}:
  2158  	case <-p.quit:
  2159  		peerLog.Tracef("Peer shutting down, could not enqueue msg: %v.",
  2160  			spew.Sdump(msg))
  2161  		if errChan != nil {
  2162  			errChan <- lnpeer.ErrPeerExiting
  2163  		}
  2164  	}
  2165  }
  2166  
  2167  // ChannelSnapshots returns a slice of channel snapshots detailing all
  2168  // currently active channels maintained with the remote peer.
  2169  func (p *Brontide) ChannelSnapshots() []*channeldb.ChannelSnapshot {
  2170  	p.activeChanMtx.RLock()
  2171  	defer p.activeChanMtx.RUnlock()
  2172  
  2173  	snapshots := make([]*channeldb.ChannelSnapshot, 0, len(p.activeChannels))
  2174  	for _, activeChan := range p.activeChannels {
  2175  		// If the activeChan is nil, then we skip it as the channel is pending.
  2176  		if activeChan == nil {
  2177  			continue
  2178  		}
  2179  
  2180  		// We'll only return a snapshot for channels that are
  2181  		// *immedately* available for routing payments over.
  2182  		if activeChan.RemoteNextRevocation() == nil {
  2183  			continue
  2184  		}
  2185  
  2186  		snapshot := activeChan.StateSnapshot()
  2187  		snapshots = append(snapshots, snapshot)
  2188  	}
  2189  
  2190  	return snapshots
  2191  }
  2192  
  2193  // genDeliveryScript returns a new script to be used to send our funds to in
  2194  // the case of a cooperative channel close negotiation.
  2195  func (p *Brontide) genDeliveryScript() ([]byte, error) {
  2196  	deliveryAddr, err := p.cfg.Wallet.NewAddress(
  2197  		lnwallet.PubKeyHash, false, lnwallet.DefaultAccountName,
  2198  	)
  2199  	if err != nil {
  2200  		return nil, err
  2201  	}
  2202  	peerLog.Infof("Delivery addr for channel close: %v",
  2203  		deliveryAddr)
  2204  
  2205  	return input.PayToAddrScript(deliveryAddr)
  2206  }
  2207  
  2208  // channelManager is goroutine dedicated to handling all requests/signals
  2209  // pertaining to the opening, cooperative closing, and force closing of all
  2210  // channels maintained with the remote peer.
  2211  //
  2212  // NOTE: This method MUST be run as a goroutine.
  2213  func (p *Brontide) channelManager() {
  2214  	defer p.wg.Done()
  2215  
  2216  	// reenableTimeout will fire once after the configured channel status
  2217  	// interval has elapsed. This will trigger us to sign new channel
  2218  	// updates and broadcast them with the "disabled" flag unset.
  2219  	reenableTimeout := time.After(p.cfg.ChanActiveTimeout)
  2220  
  2221  out:
  2222  	for {
  2223  		select {
  2224  		// A new channel has arrived which means we've just completed a
  2225  		// funding workflow. We'll initialize the necessary local
  2226  		// state, and notify the htlc switch of a new link.
  2227  		case newChanReq := <-p.newChannels:
  2228  			newChan := newChanReq.channel
  2229  			chanPoint := &newChan.FundingOutpoint
  2230  			chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
  2231  
  2232  			// Only update RemoteNextRevocation if the channel is in the
  2233  			// activeChannels map and if we added the link to the switch.
  2234  			// Only active channels will be added to the switch.
  2235  			p.activeChanMtx.Lock()
  2236  			currentChan, ok := p.activeChannels[chanID]
  2237  			if ok && currentChan != nil {
  2238  				peerLog.Infof("Already have ChannelPoint(%v), "+
  2239  					"ignoring.", chanPoint)
  2240  
  2241  				p.activeChanMtx.Unlock()
  2242  				close(newChanReq.err)
  2243  
  2244  				// If we're being sent a new channel, and our
  2245  				// existing channel doesn't have the next
  2246  				// revocation, then we need to update the
  2247  				// current existing channel.
  2248  				if currentChan.RemoteNextRevocation() != nil {
  2249  					continue
  2250  				}
  2251  
  2252  				peerLog.Infof("Processing retransmitted "+
  2253  					"FundingLocked for ChannelPoint(%v)",
  2254  					chanPoint)
  2255  
  2256  				nextRevoke := newChan.RemoteNextRevocation
  2257  				err := currentChan.InitNextRevocation(nextRevoke)
  2258  				if err != nil {
  2259  					peerLog.Errorf("unable to init chan "+
  2260  						"revocation: %v", err)
  2261  					continue
  2262  				}
  2263  
  2264  				continue
  2265  			}
  2266  
  2267  			// If not already active, we'll add this channel to the
  2268  			// set of active channels, so we can look it up later
  2269  			// easily according to its channel ID.
  2270  			lnChan, err := lnwallet.NewLightningChannel(
  2271  				p.cfg.Signer, newChan, p.cfg.SigPool,
  2272  				p.cfg.ChainParams,
  2273  			)
  2274  			if err != nil {
  2275  				p.activeChanMtx.Unlock()
  2276  				err := fmt.Errorf("unable to create "+
  2277  					"LightningChannel: %v", err)
  2278  				peerLog.Errorf(err.Error())
  2279  
  2280  				newChanReq.err <- err
  2281  				continue
  2282  			}
  2283  
  2284  			// This refreshes the activeChannels entry if the link was not in
  2285  			// the switch, also populates for new entries.
  2286  			p.activeChannels[chanID] = lnChan
  2287  			p.addedChannels[chanID] = struct{}{}
  2288  			p.activeChanMtx.Unlock()
  2289  
  2290  			peerLog.Infof("New channel active ChannelPoint(%v) "+
  2291  				"with NodeKey(%x)", chanPoint, p.PubKey())
  2292  
  2293  			// Next, we'll assemble a ChannelLink along with the
  2294  			// necessary items it needs to function.
  2295  			//
  2296  			// TODO(roasbeef): panic on below?
  2297  			chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(
  2298  				*chanPoint,
  2299  			)
  2300  			if err != nil {
  2301  				err := fmt.Errorf("unable to subscribe to "+
  2302  					"chain events: %v", err)
  2303  				peerLog.Errorf(err.Error())
  2304  
  2305  				newChanReq.err <- err
  2306  				continue
  2307  			}
  2308  
  2309  			// We'll query the localChanCfg of the new channel to determine the
  2310  			// minimum HTLC value that can be forwarded. For the maximum HTLC
  2311  			// value that can be forwarded and fees we'll use the default
  2312  			// values, as they currently are always set to the default values
  2313  			// at initial channel creation. Note that the maximum HTLC value
  2314  			// defaults to the cap on the total value of outstanding HTLCs.
  2315  			fwdMinHtlc := lnChan.FwdMinHtlc()
  2316  			defaultPolicy := p.cfg.RoutingPolicy
  2317  			forwardingPolicy := &htlcswitch.ForwardingPolicy{
  2318  				MinHTLCOut:    fwdMinHtlc,
  2319  				MaxHTLC:       newChan.LocalChanCfg.MaxPendingAmount,
  2320  				BaseFee:       defaultPolicy.BaseFee,
  2321  				FeeRate:       defaultPolicy.FeeRate,
  2322  				TimeLockDelta: defaultPolicy.TimeLockDelta,
  2323  			}
  2324  
  2325  			// If we've reached this point, there are two possible scenarios.
  2326  			// If the channel was in the active channels map as nil, then it
  2327  			// was loaded from disk and we need to send reestablish. Else,
  2328  			// it was not loaded from disk and we don't need to send
  2329  			// reestablish as this is a fresh channel.
  2330  			shouldReestablish := ok
  2331  
  2332  			// Create the link and add it to the switch.
  2333  			err = p.addLink(
  2334  				chanPoint, lnChan, forwardingPolicy,
  2335  				chainEvents, shouldReestablish,
  2336  			)
  2337  			if err != nil {
  2338  				err := fmt.Errorf("can't register new channel "+
  2339  					"link(%v) with NodeKey(%x)", chanPoint,
  2340  					p.PubKey())
  2341  				peerLog.Errorf(err.Error())
  2342  
  2343  				newChanReq.err <- err
  2344  				continue
  2345  			}
  2346  
  2347  			close(newChanReq.err)
  2348  
  2349  		// We've just received a local request to close an active
  2350  		// channel. It will either kick of a cooperative channel
  2351  		// closure negotiation, or be a notification of a breached
  2352  		// contract that should be abandoned.
  2353  		case req := <-p.localCloseChanReqs:
  2354  			p.handleLocalCloseReq(req)
  2355  
  2356  		// We've received a link failure from a link that was added to
  2357  		// the switch. This will initiate the teardown of the link, and
  2358  		// initiate any on-chain closures if necessary.
  2359  		case failure := <-p.linkFailures:
  2360  			p.handleLinkFailure(failure)
  2361  
  2362  		// We've received a new cooperative channel closure related
  2363  		// message from the remote peer, we'll use this message to
  2364  		// advance the chan closer state machine.
  2365  		case closeMsg := <-p.chanCloseMsgs:
  2366  			p.handleCloseMsg(closeMsg)
  2367  
  2368  		// The channel reannounce delay has elapsed, broadcast the
  2369  		// reenabled channel updates to the network. This should only
  2370  		// fire once, so we set the reenableTimeout channel to nil to
  2371  		// mark it for garbage collection. If the peer is torn down
  2372  		// before firing, reenabling will not be attempted.
  2373  		// TODO(conner): consolidate reenables timers inside chan status
  2374  		// manager
  2375  		case <-reenableTimeout:
  2376  			p.reenableActiveChannels()
  2377  
  2378  			// Since this channel will never fire again during the
  2379  			// lifecycle of the peer, we nil the channel to mark it
  2380  			// eligible for garbage collection, and make this
  2381  			// explicitly ineligible to receive in future calls to
  2382  			// select. This also shaves a few CPU cycles since the
  2383  			// select will ignore this case entirely.
  2384  			reenableTimeout = nil
  2385  
  2386  		case <-p.quit:
  2387  			// As, we've been signalled to exit, we'll reset all
  2388  			// our active channel back to their default state.
  2389  			p.activeChanMtx.Lock()
  2390  			for _, channel := range p.activeChannels {
  2391  				// If the channel is nil, continue as it's a pending channel.
  2392  				if channel == nil {
  2393  					continue
  2394  				}
  2395  
  2396  				channel.ResetState()
  2397  			}
  2398  			p.activeChanMtx.Unlock()
  2399  
  2400  			break out
  2401  		}
  2402  	}
  2403  }
  2404  
  2405  // reenableActiveChannels searches the index of channels maintained with this
  2406  // peer, and reenables each public, non-pending channel. This is done at the
  2407  // gossip level by broadcasting a new ChannelUpdate with the disabled bit unset.
  2408  // No message will be sent if the channel is already enabled.
  2409  func (p *Brontide) reenableActiveChannels() {
  2410  	// First, filter all known channels with this peer for ones that are
  2411  	// both public and not pending.
  2412  	var activePublicChans []wire.OutPoint
  2413  	p.activeChanMtx.RLock()
  2414  	for chanID, lnChan := range p.activeChannels {
  2415  		// If the lnChan is nil, continue as this is a pending channel.
  2416  		if lnChan == nil {
  2417  			continue
  2418  		}
  2419  
  2420  		dbChan := lnChan.State()
  2421  		isPublic := dbChan.ChannelFlags&lnwire.FFAnnounceChannel != 0
  2422  		if !isPublic || dbChan.IsPending {
  2423  			continue
  2424  		}
  2425  
  2426  		// We'll also skip any channels added during this peer's
  2427  		// lifecycle since they haven't waited out the timeout. Their
  2428  		// first announcement will be enabled, and the chan status
  2429  		// manager will begin monitoring them passively since they exist
  2430  		// in the database.
  2431  		if _, ok := p.addedChannels[chanID]; ok {
  2432  			continue
  2433  		}
  2434  
  2435  		activePublicChans = append(
  2436  			activePublicChans, dbChan.FundingOutpoint,
  2437  		)
  2438  	}
  2439  	p.activeChanMtx.RUnlock()
  2440  
  2441  	// For each of the public, non-pending channels, set the channel
  2442  	// disabled bit to false and send out a new ChannelUpdate. If this
  2443  	// channel is already active, the update won't be sent.
  2444  	for _, chanPoint := range activePublicChans {
  2445  		err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
  2446  		if err == netann.ErrEnableManuallyDisabledChan {
  2447  			peerLog.Debugf("Channel(%v) was manually disabled, ignoring "+
  2448  				"automatic enable request", chanPoint)
  2449  		} else if err != nil {
  2450  			peerLog.Errorf("Unable to enable channel %v: %v",
  2451  				chanPoint, err)
  2452  		}
  2453  	}
  2454  }
  2455  
  2456  // fetchActiveChanCloser attempts to fetch the active chan closer state machine
  2457  // for the target channel ID. If the channel isn't active an error is returned.
  2458  // Otherwise, either an existing state machine will be returned, or a new one
  2459  // will be created.
  2460  func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
  2461  	*chancloser.ChanCloser, error) {
  2462  
  2463  	// First, we'll ensure that we actually know of the target channel. If
  2464  	// not, we'll ignore this message.
  2465  	p.activeChanMtx.RLock()
  2466  	channel, ok := p.activeChannels[chanID]
  2467  	p.activeChanMtx.RUnlock()
  2468  
  2469  	// If the channel isn't in the map or the channel is nil, return
  2470  	// ErrChannelNotFound as the channel is pending.
  2471  	if !ok || channel == nil {
  2472  		return nil, ErrChannelNotFound
  2473  	}
  2474  
  2475  	// We'll attempt to look up the matching state machine, if we can't
  2476  	// find one then this means that the remote party is initiating a
  2477  	// cooperative channel closure.
  2478  	chanCloser, ok := p.activeChanCloses[chanID]
  2479  	if !ok {
  2480  		// Optimistically try a link shutdown, erroring out if it
  2481  		// failed.
  2482  		if err := p.tryLinkShutdown(chanID); err != nil {
  2483  			peerLog.Errorf("failed link shutdown: %v", err)
  2484  			return nil, err
  2485  		}
  2486  
  2487  		// We'll create a valid closing state machine in order to
  2488  		// respond to the initiated cooperative channel closure. First,
  2489  		// we set the delivery script that our funds will be paid out
  2490  		// to. If an upfront shutdown script was set, we will use it.
  2491  		// Otherwise, we get a fresh delivery script.
  2492  		//
  2493  		// TODO: Expose option to allow upfront shutdown script from
  2494  		// watch-only accounts.
  2495  		deliveryScript := channel.LocalUpfrontShutdownScript()
  2496  		if len(deliveryScript) == 0 {
  2497  			var err error
  2498  			deliveryScript, err = p.genDeliveryScript()
  2499  			if err != nil {
  2500  				peerLog.Errorf("unable to gen delivery script: %v", err)
  2501  				return nil, fmt.Errorf("close addr unavailable")
  2502  			}
  2503  		}
  2504  
  2505  		// In order to begin fee negotiations, we'll first compute our
  2506  		// target ideal fee-per-kb. We'll set this to a lax value, as
  2507  		// we weren't the ones that initiated the channel closure.
  2508  		feePerKB, err := p.cfg.FeeEstimator.EstimateFeePerKB(
  2509  			p.cfg.CoopCloseTargetConfs,
  2510  		)
  2511  		if err != nil {
  2512  			peerLog.Errorf("unable to query fee estimator: %v", err)
  2513  
  2514  			return nil, fmt.Errorf("unable to estimate fee")
  2515  		}
  2516  
  2517  		_, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
  2518  		if err != nil {
  2519  			peerLog.Errorf("unable to obtain best block: %v", err)
  2520  			return nil, fmt.Errorf("cannot obtain best block")
  2521  		}
  2522  
  2523  		chanCloser = chancloser.NewChanCloser(
  2524  			chancloser.ChanCloseCfg{
  2525  				Channel:     channel,
  2526  				BroadcastTx: p.cfg.Wallet.PublishTransaction,
  2527  				DisableChannel: func(chanPoint wire.OutPoint) error {
  2528  					return p.cfg.ChanStatusMgr.RequestDisable(chanPoint, false)
  2529  				},
  2530  				Disconnect: func() error {
  2531  					return p.cfg.DisconnectPeer(p.IdentityKey())
  2532  				},
  2533  				Quit: p.quit,
  2534  			},
  2535  			deliveryScript,
  2536  			feePerKB,
  2537  			uint32(startingHeight),
  2538  			nil,
  2539  			false,
  2540  		)
  2541  		p.activeChanCloses[chanID] = chanCloser
  2542  	}
  2543  
  2544  	return chanCloser, nil
  2545  }
  2546  
  2547  // chooseDeliveryScript takes two optionally set shutdown scripts and returns
  2548  // a suitable script to close out to. This may be nil if neither script is
  2549  // set. If both scripts are set, this function will error if they do not match.
  2550  func chooseDeliveryScript(upfront,
  2551  	requested lnwire.DeliveryAddress) (lnwire.DeliveryAddress, error) {
  2552  
  2553  	// If no upfront shutdown script was provided, return the user
  2554  	// requested address (which may be nil).
  2555  	if len(upfront) == 0 {
  2556  		return requested, nil
  2557  	}
  2558  
  2559  	// If an upfront shutdown script was provided, and the user did not request
  2560  	// a custom shutdown script, return the upfront address.
  2561  	if len(requested) == 0 {
  2562  		return upfront, nil
  2563  	}
  2564  
  2565  	// If both an upfront shutdown script and a custom close script were
  2566  	// provided, error if the user provided shutdown script does not match
  2567  	// the upfront shutdown script (because closing out to a different script
  2568  	// would violate upfront shutdown).
  2569  	if !bytes.Equal(upfront, requested) {
  2570  		return nil, chancloser.ErrUpfrontShutdownScriptMismatch
  2571  	}
  2572  
  2573  	// The user requested script matches the upfront shutdown script, so we
  2574  	// can return it without error.
  2575  	return upfront, nil
  2576  }
  2577  
  2578  // handleLocalCloseReq kicks-off the workflow to execute a cooperative or
  2579  // forced unilateral closure of the channel initiated by a local subsystem.
  2580  func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
  2581  	chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint)
  2582  
  2583  	p.activeChanMtx.RLock()
  2584  	channel, ok := p.activeChannels[chanID]
  2585  	p.activeChanMtx.RUnlock()
  2586  
  2587  	// Though this function can't be called for pending channels, we still
  2588  	// check whether channel is nil for safety.
  2589  	if !ok || channel == nil {
  2590  		err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+
  2591  			"unknown", chanID)
  2592  		peerLog.Errorf(err.Error())
  2593  		req.Err <- err
  2594  		return
  2595  	}
  2596  
  2597  	switch req.CloseType {
  2598  
  2599  	// A type of CloseRegular indicates that the user has opted to close
  2600  	// out this channel on-chain, so we execute the cooperative channel
  2601  	// closure workflow.
  2602  	case contractcourt.CloseRegular:
  2603  		// First, we'll choose a delivery address that we'll use to send the
  2604  		// funds to in the case of a successful negotiation.
  2605  
  2606  		// An upfront shutdown and user provided script are both optional,
  2607  		// but must be equal if both set  (because we cannot serve a request
  2608  		// to close out to a script which violates upfront shutdown). Get the
  2609  		// appropriate address to close out to (which may be nil if neither
  2610  		// are set) and error if they are both set and do not match.
  2611  		deliveryScript, err := chooseDeliveryScript(
  2612  			channel.LocalUpfrontShutdownScript(), req.DeliveryScript,
  2613  		)
  2614  		if err != nil {
  2615  			peerLog.Errorf("cannot close channel %v: %v", req.ChanPoint, err)
  2616  			req.Err <- err
  2617  			return
  2618  		}
  2619  
  2620  		// If neither an upfront address or a user set address was
  2621  		// provided, generate a fresh script.
  2622  		if len(deliveryScript) == 0 {
  2623  			deliveryScript, err = p.genDeliveryScript()
  2624  			if err != nil {
  2625  				peerLog.Errorf(err.Error())
  2626  				req.Err <- err
  2627  				return
  2628  			}
  2629  		}
  2630  
  2631  		// Next, we'll create a new channel closer state machine to
  2632  		// handle the close negotiation.
  2633  		_, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
  2634  		if err != nil {
  2635  			peerLog.Errorf(err.Error())
  2636  			req.Err <- err
  2637  			return
  2638  		}
  2639  
  2640  		// Optimistically try a link shutdown, erroring out if it
  2641  		// failed.
  2642  		if err := p.tryLinkShutdown(chanID); err != nil {
  2643  			peerLog.Errorf("failed link shutdown: %v", err)
  2644  			req.Err <- err
  2645  			return
  2646  		}
  2647  
  2648  		chanCloser := chancloser.NewChanCloser(
  2649  			chancloser.ChanCloseCfg{
  2650  				Channel:     channel,
  2651  				BroadcastTx: p.cfg.Wallet.PublishTransaction,
  2652  				DisableChannel: func(chanPoint wire.OutPoint) error {
  2653  					return p.cfg.ChanStatusMgr.RequestDisable(chanPoint, false)
  2654  				},
  2655  				Disconnect: func() error {
  2656  					return p.cfg.DisconnectPeer(p.IdentityKey())
  2657  				},
  2658  				Quit: p.quit,
  2659  			},
  2660  			deliveryScript,
  2661  			req.TargetFeePerKB,
  2662  			uint32(startingHeight),
  2663  			req,
  2664  			true,
  2665  		)
  2666  		p.activeChanCloses[chanID] = chanCloser
  2667  
  2668  		// Finally, we'll initiate the channel shutdown within the
  2669  		// chanCloser, and send the shutdown message to the remote
  2670  		// party to kick things off.
  2671  		shutdownMsg, err := chanCloser.ShutdownChan()
  2672  		if err != nil {
  2673  			peerLog.Errorf(err.Error())
  2674  			req.Err <- err
  2675  			delete(p.activeChanCloses, chanID)
  2676  
  2677  			// As we were unable to shutdown the channel, we'll
  2678  			// return it back to its normal state.
  2679  			channel.ResetState()
  2680  			return
  2681  		}
  2682  
  2683  		p.queueMsg(shutdownMsg, nil)
  2684  
  2685  	// A type of CloseBreach indicates that the counterparty has breached
  2686  	// the channel therefore we need to clean up our local state.
  2687  	case contractcourt.CloseBreach:
  2688  		// TODO(roasbeef): no longer need with newer beach logic?
  2689  		peerLog.Infof("ChannelPoint(%v) has been breached, wiping "+
  2690  			"channel", req.ChanPoint)
  2691  		p.WipeChannel(req.ChanPoint)
  2692  	}
  2693  }
  2694  
  2695  // linkFailureReport is sent to the channelManager whenever a link reports a
  2696  // link failure, and is forced to exit. The report houses the necessary
  2697  // information to clean up the channel state, send back the error message, and
  2698  // force close if necessary.
  2699  type linkFailureReport struct {
  2700  	chanPoint   wire.OutPoint
  2701  	chanID      lnwire.ChannelID
  2702  	shortChanID lnwire.ShortChannelID
  2703  	linkErr     htlcswitch.LinkFailureError
  2704  }
  2705  
  2706  // handleLinkFailure processes a link failure report when a link in the switch
  2707  // fails. It facilitates the removal of all channel state within the peer,
  2708  // force closing the channel depending on severity, and sending the error
  2709  // message back to the remote party.
  2710  func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
  2711  	// Retrieve the channel from the map of active channels. We do this to
  2712  	// have access to it even after WipeChannel remove it from the map.
  2713  	chanID := lnwire.NewChanIDFromOutPoint(&failure.chanPoint)
  2714  	p.activeChanMtx.Lock()
  2715  	lnChan := p.activeChannels[chanID]
  2716  	p.activeChanMtx.Unlock()
  2717  
  2718  	// We begin by wiping the link, which will remove it from the switch,
  2719  	// such that it won't be attempted used for any more updates.
  2720  	//
  2721  	// TODO(halseth): should introduce a way to atomically stop/pause the
  2722  	// link and cancel back any adds in its mailboxes such that we can
  2723  	// safely force close without the link being added again and updates
  2724  	// being applied.
  2725  	p.WipeChannel(&failure.chanPoint)
  2726  
  2727  	// If the error encountered was severe enough, we'll now force close the
  2728  	// channel to prevent reading it to the switch in the future.
  2729  	if failure.linkErr.ForceClose {
  2730  		peerLog.Warnf("Force closing link(%v)",
  2731  			failure.shortChanID)
  2732  
  2733  		closeTx, err := p.cfg.ChainArb.ForceCloseContract(
  2734  			failure.chanPoint,
  2735  		)
  2736  		if err != nil {
  2737  			peerLog.Errorf("unable to force close "+
  2738  				"link(%v): %v", failure.shortChanID, err)
  2739  		} else {
  2740  			peerLog.Infof("channel(%v) force "+
  2741  				"closed with txid %v",
  2742  				failure.shortChanID, closeTx.TxHash())
  2743  		}
  2744  	}
  2745  
  2746  	// If this is a permanent failure, we will mark the channel borked.
  2747  	if failure.linkErr.PermanentFailure && lnChan != nil {
  2748  		peerLog.Warnf("Marking link(%v) borked due to permanent "+
  2749  			"failure", failure.shortChanID)
  2750  
  2751  		if err := lnChan.State().MarkBorked(); err != nil {
  2752  			peerLog.Errorf("Unable to mark channel %v borked: %v",
  2753  				failure.shortChanID, err)
  2754  		}
  2755  	}
  2756  
  2757  	// Send an error to the peer, why we failed the channel.
  2758  	if failure.linkErr.ShouldSendToPeer() {
  2759  		// If SendData is set, send it to the peer. If not, we'll use
  2760  		// the standard error messages in the payload. We only include
  2761  		// sendData in the cases where the error data does not contain
  2762  		// sensitive information.
  2763  		data := []byte(failure.linkErr.Error())
  2764  		if failure.linkErr.SendData != nil {
  2765  			data = failure.linkErr.SendData
  2766  		}
  2767  		err := p.SendMessage(true, &lnwire.Error{
  2768  			ChanID: failure.chanID,
  2769  			Data:   data,
  2770  		})
  2771  		if err != nil {
  2772  			peerLog.Errorf("unable to send msg to "+
  2773  				"remote peer: %v", err)
  2774  		}
  2775  	}
  2776  }
  2777  
  2778  // tryLinkShutdown attempts to fetch a target link from the switch, calls
  2779  // ShutdownIfChannelClean to optimistically trigger a link shutdown, and
  2780  // removes the link from the switch. It returns an error if any step failed.
  2781  func (p *Brontide) tryLinkShutdown(cid lnwire.ChannelID) error {
  2782  	// Fetch the appropriate link and call ShutdownIfChannelClean to ensure
  2783  	// no other updates can occur.
  2784  	chanLink := p.fetchLinkFromKeyAndCid(cid)
  2785  
  2786  	// If the link happens to be nil, return ErrChannelNotFound so we can
  2787  	// ignore the close message.
  2788  	if chanLink == nil {
  2789  		return ErrChannelNotFound
  2790  	}
  2791  
  2792  	// Else, the link exists, so attempt to trigger shutdown. If this
  2793  	// fails, we'll send an error message to the remote peer.
  2794  	if err := chanLink.ShutdownIfChannelClean(); err != nil {
  2795  		return err
  2796  	}
  2797  
  2798  	// Next, we remove the link from the switch to shut down all of the
  2799  	// link's goroutines and remove it from the switch's internal maps. We
  2800  	// don't call WipeChannel as the channel must still be in the
  2801  	// activeChannels map to process coop close messages.
  2802  	p.cfg.Switch.RemoveLink(cid)
  2803  
  2804  	return nil
  2805  }
  2806  
  2807  // fetchLinkFromKeyAndCid fetches a link from the switch via the remote's
  2808  // public key and the channel id.
  2809  func (p *Brontide) fetchLinkFromKeyAndCid(
  2810  	cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
  2811  
  2812  	var chanLink htlcswitch.ChannelUpdateHandler
  2813  
  2814  	// We don't need to check the error here, and can instead just loop
  2815  	// over the slice and return nil.
  2816  	links, _ := p.cfg.Switch.GetLinksByInterface(p.cfg.PubKeyBytes)
  2817  	for _, link := range links {
  2818  		if link.ChanID() == cid {
  2819  			chanLink = link
  2820  			break
  2821  		}
  2822  	}
  2823  
  2824  	return chanLink
  2825  }
  2826  
  2827  // finalizeChanClosure performs the final clean up steps once the cooperative
  2828  // closure transaction has been fully broadcast. The finalized closing state
  2829  // machine should be passed in. Once the transaction has been sufficiently
  2830  // confirmed, the channel will be marked as fully closed within the database,
  2831  // and any clients will be notified of updates to the closing state.
  2832  func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
  2833  	closeReq := chanCloser.CloseRequest()
  2834  
  2835  	// First, we'll clear all indexes related to the channel in question.
  2836  	chanPoint := chanCloser.Channel().ChannelPoint()
  2837  	p.WipeChannel(chanPoint)
  2838  
  2839  	// Next, we'll launch a goroutine which will request to be notified by
  2840  	// the ChainNotifier once the closure transaction obtains a single
  2841  	// confirmation.
  2842  	notifier := p.cfg.ChainNotifier
  2843  
  2844  	// If any error happens during waitForChanToClose, forward it to
  2845  	// closeReq. If this channel closure is not locally initiated, closeReq
  2846  	// will be nil, so just ignore the error.
  2847  	errChan := make(chan error, 1)
  2848  	if closeReq != nil {
  2849  		errChan = closeReq.Err
  2850  	}
  2851  
  2852  	closingTx, err := chanCloser.ClosingTx()
  2853  	if err != nil {
  2854  		if closeReq != nil {
  2855  			peerLog.Error(err)
  2856  			closeReq.Err <- err
  2857  		}
  2858  	}
  2859  
  2860  	closingTxid := closingTx.TxHash()
  2861  
  2862  	// If this is a locally requested shutdown, update the caller with a
  2863  	// new event detailing the current pending state of this request.
  2864  	if closeReq != nil {
  2865  		closeReq.Updates <- &PendingUpdate{
  2866  			Txid: closingTxid[:],
  2867  		}
  2868  	}
  2869  
  2870  	go WaitForChanToClose(chanCloser.NegotiationHeight(), notifier, errChan,
  2871  		chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() {
  2872  
  2873  			// Respond to the local subsystem which requested the
  2874  			// channel closure.
  2875  			if closeReq != nil {
  2876  				closeReq.Updates <- &ChannelCloseUpdate{
  2877  					ClosingTxid: closingTxid[:],
  2878  					Success:     true,
  2879  				}
  2880  			}
  2881  		})
  2882  }
  2883  
  2884  // WaitForChanToClose uses the passed notifier to wait until the channel has
  2885  // been detected as closed on chain and then concludes by executing the
  2886  // following actions: the channel point will be sent over the settleChan, and
  2887  // finally the callback will be executed. If any error is encountered within
  2888  // the function, then it will be sent over the errChan.
  2889  func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
  2890  	errChan chan error, chanPoint *wire.OutPoint,
  2891  	closingTxID *chainhash.Hash, closeScript []byte, cb func()) {
  2892  
  2893  	peerLog.Infof("Waiting for confirmation of cooperative close of "+
  2894  		"ChannelPoint(%v) with txid: %v", chanPoint,
  2895  		closingTxID)
  2896  
  2897  	// TODO(roasbeef): add param for num needed confs
  2898  	confNtfn, err := notifier.RegisterConfirmationsNtfn(
  2899  		closingTxID, closeScript, 1, bestHeight,
  2900  	)
  2901  	if err != nil {
  2902  		if errChan != nil {
  2903  			errChan <- err
  2904  		}
  2905  		return
  2906  	}
  2907  
  2908  	// In the case that the ChainNotifier is shutting down, all subscriber
  2909  	// notification channels will be closed, generating a nil receive.
  2910  	height, ok := <-confNtfn.Confirmed
  2911  	if !ok {
  2912  		return
  2913  	}
  2914  
  2915  	// The channel has been closed, remove it from any active indexes, and
  2916  	// the database state.
  2917  	peerLog.Infof("ChannelPoint(%v) is now closed at "+
  2918  		"height %v", chanPoint, height.BlockHeight)
  2919  
  2920  	// Finally, execute the closure call back to mark the confirmation of
  2921  	// the transaction closing the contract.
  2922  	cb()
  2923  }
  2924  
  2925  // WipeChannel removes the passed channel point from all indexes associated with
  2926  // the peer and the switch.
  2927  func (p *Brontide) WipeChannel(chanPoint *wire.OutPoint) {
  2928  	chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
  2929  
  2930  	p.activeChanMtx.Lock()
  2931  	delete(p.activeChannels, chanID)
  2932  	p.activeChanMtx.Unlock()
  2933  
  2934  	// Instruct the HtlcSwitch to close this link as the channel is no
  2935  	// longer active.
  2936  	p.cfg.Switch.RemoveLink(chanID)
  2937  }
  2938  
  2939  // handleInitMsg handles the incoming init message which contains global and
  2940  // local feature vectors. If feature vectors are incompatible then disconnect.
  2941  func (p *Brontide) handleInitMsg(msg *lnwire.Init) error {
  2942  	// First, merge any features from the legacy global features field into
  2943  	// those presented in the local features fields.
  2944  	err := msg.Features.Merge(msg.GlobalFeatures)
  2945  	if err != nil {
  2946  		return fmt.Errorf("unable to merge legacy global features: %v",
  2947  			err)
  2948  	}
  2949  
  2950  	// Then, finalize the remote feature vector providing the flattened
  2951  	// feature bit namespace.
  2952  	p.remoteFeatures = lnwire.NewFeatureVector(
  2953  		msg.Features, lnwire.Features,
  2954  	)
  2955  
  2956  	// Now that we have their features loaded, we'll ensure that they
  2957  	// didn't set any required bits that we don't know of.
  2958  	err = feature.ValidateRequired(p.remoteFeatures)
  2959  	if err != nil {
  2960  		return fmt.Errorf("invalid remote features: %v", err)
  2961  	}
  2962  
  2963  	// Ensure the remote party's feature vector contains all transitive
  2964  	// dependencies. We know ours are correct since they are validated
  2965  	// during the feature manager's instantiation.
  2966  	err = feature.ValidateDeps(p.remoteFeatures)
  2967  	if err != nil {
  2968  		return fmt.Errorf("invalid remote features: %v", err)
  2969  	}
  2970  
  2971  	// Now that we know we understand their requirements, we'll check to
  2972  	// see if they don't support anything that we deem to be mandatory.
  2973  	if !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired) {
  2974  		return fmt.Errorf("data loss protection required")
  2975  	}
  2976  
  2977  	return nil
  2978  }
  2979  
  2980  // LocalFeatures returns the set of global features that has been advertised by
  2981  // the local node. This allows sub-systems that use this interface to gate their
  2982  // behavior off the set of negotiated feature bits.
  2983  //
  2984  // NOTE: Part of the lnpeer.Peer interface.
  2985  func (p *Brontide) LocalFeatures() *lnwire.FeatureVector {
  2986  	return p.cfg.Features
  2987  }
  2988  
  2989  // RemoteFeatures returns the set of global features that has been advertised by
  2990  // the remote node. This allows sub-systems that use this interface to gate
  2991  // their behavior off the set of negotiated feature bits.
  2992  //
  2993  // NOTE: Part of the lnpeer.Peer interface.
  2994  func (p *Brontide) RemoteFeatures() *lnwire.FeatureVector {
  2995  	return p.remoteFeatures
  2996  }
  2997  
  2998  // sendInitMsg sends the Init message to the remote peer. This message contains
  2999  // our currently supported local and global features.
  3000  func (p *Brontide) sendInitMsg(legacyChan bool) error {
  3001  	features := p.cfg.Features.Clone()
  3002  	legacyFeatures := p.cfg.LegacyFeatures.Clone()
  3003  
  3004  	// If we have a legacy channel open with a peer, we downgrade static
  3005  	// remote required to optional in case the peer does not understand the
  3006  	// required feature bit. If we do not do this, the peer will reject our
  3007  	// connection because it does not understand a required feature bit, and
  3008  	// our channel will be unusable.
  3009  	if legacyChan && features.RequiresFeature(lnwire.StaticRemoteKeyRequired) {
  3010  		peerLog.Infof("Legacy channel open with peer: %x, "+
  3011  			"downgrading static remote required feature bit to "+
  3012  			"optional", p.PubKey())
  3013  
  3014  		// Unset and set in both the local and global features to
  3015  		// ensure both sets are consistent and merge able by old and
  3016  		// new nodes.
  3017  		features.Unset(lnwire.StaticRemoteKeyRequired)
  3018  		legacyFeatures.Unset(lnwire.StaticRemoteKeyRequired)
  3019  
  3020  		features.Set(lnwire.StaticRemoteKeyOptional)
  3021  		legacyFeatures.Set(lnwire.StaticRemoteKeyOptional)
  3022  	}
  3023  
  3024  	msg := lnwire.NewInitMessage(
  3025  		legacyFeatures.RawFeatureVector,
  3026  		features.RawFeatureVector,
  3027  	)
  3028  
  3029  	return p.writeMessage(msg)
  3030  }
  3031  
  3032  // resendChanSyncMsg will attempt to find a channel sync message for the closed
  3033  // channel and resend it to our peer.
  3034  func (p *Brontide) resendChanSyncMsg(cid lnwire.ChannelID) error {
  3035  	// If we already re-sent the mssage for this channel, we won't do it
  3036  	// again.
  3037  	if _, ok := p.resentChanSyncMsg[cid]; ok {
  3038  		return nil
  3039  	}
  3040  
  3041  	// Check if we have any channel sync messages stored for this channel.
  3042  	c, err := p.cfg.ChannelDB.FetchClosedChannelForID(cid)
  3043  	if err != nil {
  3044  		return fmt.Errorf("unable to fetch channel sync messages for "+
  3045  			"peer %v: %v", p, err)
  3046  	}
  3047  
  3048  	if c.LastChanSyncMsg == nil {
  3049  		return fmt.Errorf("no chan sync message stored for channel %v",
  3050  			cid)
  3051  	}
  3052  
  3053  	if !c.RemotePub.IsEqual(p.IdentityKey()) {
  3054  		return fmt.Errorf("ignoring channel reestablish from "+
  3055  			"peer=%x", p.IdentityKey().SerializeCompressed())
  3056  	}
  3057  
  3058  	peerLog.Debugf("Re-sending channel sync message for channel %v to "+
  3059  		"peer %v", cid, p)
  3060  
  3061  	if err := p.SendMessage(true, c.LastChanSyncMsg); err != nil {
  3062  		return fmt.Errorf("failed resending channel sync "+
  3063  			"message to peer %v: %v", p, err)
  3064  	}
  3065  
  3066  	peerLog.Debugf("Re-sent channel sync message for channel %v to peer "+
  3067  		"%v", cid, p)
  3068  
  3069  	// Note down that we sent the message, so we won't resend it again for
  3070  	// this connection.
  3071  	p.resentChanSyncMsg[cid] = struct{}{}
  3072  
  3073  	return nil
  3074  }
  3075  
  3076  // SendMessage sends a variadic number of high-priority messages to the remote
  3077  // peer. The first argument denotes if the method should block until the
  3078  // messages have been sent to the remote peer or an error is returned,
  3079  // otherwise it returns immediately after queuing.
  3080  //
  3081  // NOTE: Part of the lnpeer.Peer interface.
  3082  func (p *Brontide) SendMessage(sync bool, msgs ...lnwire.Message) error {
  3083  	return p.sendMessage(sync, true, msgs...)
  3084  }
  3085  
  3086  // SendMessageLazy sends a variadic number of low-priority messages to the
  3087  // remote peer. The first argument denotes if the method should block until
  3088  // the messages have been sent to the remote peer or an error is returned,
  3089  // otherwise it returns immediately after queueing.
  3090  //
  3091  // NOTE: Part of the lnpeer.Peer interface.
  3092  func (p *Brontide) SendMessageLazy(sync bool, msgs ...lnwire.Message) error {
  3093  	return p.sendMessage(sync, false, msgs...)
  3094  }
  3095  
  3096  // sendMessage queues a variadic number of messages using the passed priority
  3097  // to the remote peer. If sync is true, this method will block until the
  3098  // messages have been sent to the remote peer or an error is returned, otherwise
  3099  // it returns immediately after queueing.
  3100  func (p *Brontide) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
  3101  	// Add all incoming messages to the outgoing queue. A list of error
  3102  	// chans is populated for each message if the caller requested a sync
  3103  	// send.
  3104  	var errChans []chan error
  3105  	if sync {
  3106  		errChans = make([]chan error, 0, len(msgs))
  3107  	}
  3108  	for _, msg := range msgs {
  3109  		// If a sync send was requested, create an error chan to listen
  3110  		// for an ack from the writeHandler.
  3111  		var errChan chan error
  3112  		if sync {
  3113  			errChan = make(chan error, 1)
  3114  			errChans = append(errChans, errChan)
  3115  		}
  3116  
  3117  		if priority {
  3118  			p.queueMsg(msg, errChan)
  3119  		} else {
  3120  			p.queueMsgLazy(msg, errChan)
  3121  		}
  3122  	}
  3123  
  3124  	// Wait for all replies from the writeHandler. For async sends, this
  3125  	// will be a NOP as the list of error chans is nil.
  3126  	for _, errChan := range errChans {
  3127  		select {
  3128  		case err := <-errChan:
  3129  			return err
  3130  		case <-p.quit:
  3131  			return lnpeer.ErrPeerExiting
  3132  		case <-p.cfg.Quit:
  3133  			return lnpeer.ErrPeerExiting
  3134  		}
  3135  	}
  3136  
  3137  	return nil
  3138  }
  3139  
  3140  // PubKey returns the pubkey of the peer in compressed serialized format.
  3141  //
  3142  // NOTE: Part of the lnpeer.Peer interface.
  3143  func (p *Brontide) PubKey() [33]byte {
  3144  	return p.cfg.PubKeyBytes
  3145  }
  3146  
  3147  // IdentityKey returns the public key of the remote peer.
  3148  //
  3149  // NOTE: Part of the lnpeer.Peer interface.
  3150  func (p *Brontide) IdentityKey() *secp256k1.PublicKey {
  3151  	return p.cfg.Addr.IdentityKey
  3152  }
  3153  
  3154  // Address returns the network address of the remote peer.
  3155  //
  3156  // NOTE: Part of the lnpeer.Peer interface.
  3157  func (p *Brontide) Address() net.Addr {
  3158  	return p.cfg.Addr.Address
  3159  }
  3160  
  3161  // AddNewChannel adds a new channel to the peer. The channel should fail to be
  3162  // added if the cancel channel is closed.
  3163  //
  3164  // NOTE: Part of the lnpeer.Peer interface.
  3165  func (p *Brontide) AddNewChannel(channel *channeldb.OpenChannel,
  3166  	cancel <-chan struct{}) error {
  3167  
  3168  	errChan := make(chan error, 1)
  3169  	newChanMsg := &newChannelMsg{
  3170  		channel: channel,
  3171  		err:     errChan,
  3172  	}
  3173  
  3174  	select {
  3175  	case p.newChannels <- newChanMsg:
  3176  	case <-cancel:
  3177  		return errors.New("canceled adding new channel")
  3178  	case <-p.quit:
  3179  		return lnpeer.ErrPeerExiting
  3180  	}
  3181  
  3182  	// We pause here to wait for the peer to recognize the new channel
  3183  	// before we close the channel barrier corresponding to the channel.
  3184  	select {
  3185  	case err := <-errChan:
  3186  		return err
  3187  	case <-p.quit:
  3188  		return lnpeer.ErrPeerExiting
  3189  	}
  3190  }
  3191  
  3192  // StartTime returns the time at which the connection was established if the
  3193  // peer started successfully, and zero otherwise.
  3194  func (p *Brontide) StartTime() time.Time {
  3195  	return p.startTime
  3196  }
  3197  
  3198  // handleCloseMsg is called when a new cooperative channel closure related
  3199  // message is received from the remote peer. We'll use this message to advance
  3200  // the chan closer state machine.
  3201  func (p *Brontide) handleCloseMsg(msg *closeMsg) {
  3202  	// We'll now fetch the matching closing state machine in order to continue,
  3203  	// or finalize the channel closure process.
  3204  	chanCloser, err := p.fetchActiveChanCloser(msg.cid)
  3205  	if err != nil {
  3206  		// If the channel is not known to us, we'll simply ignore this message.
  3207  		if err == ErrChannelNotFound {
  3208  			return
  3209  		}
  3210  
  3211  		peerLog.Errorf("Unable to respond to remote close msg: %v", err)
  3212  
  3213  		errMsg := &lnwire.Error{
  3214  			ChanID: msg.cid,
  3215  			Data:   lnwire.ErrorData(err.Error()),
  3216  		}
  3217  		p.queueMsg(errMsg, nil)
  3218  		return
  3219  	}
  3220  
  3221  	// Next, we'll process the next message using the target state machine.
  3222  	// We'll either continue negotiation, or halt.
  3223  	msgs, closeFin, err := chanCloser.ProcessCloseMsg(
  3224  		msg.msg,
  3225  	)
  3226  	if err != nil {
  3227  		err := fmt.Errorf("unable to process close msg: %v", err)
  3228  		peerLog.Error(err)
  3229  
  3230  		// As the negotiations failed, we'll reset the channel state machine to
  3231  		// ensure we act to on-chain events as normal.
  3232  		chanCloser.Channel().ResetState()
  3233  
  3234  		if chanCloser.CloseRequest() != nil {
  3235  			chanCloser.CloseRequest().Err <- err
  3236  		}
  3237  		delete(p.activeChanCloses, msg.cid)
  3238  		return
  3239  	}
  3240  
  3241  	// Queue any messages to the remote peer that need to be sent as a part of
  3242  	// this latest round of negotiations.
  3243  	for _, msg := range msgs {
  3244  		p.queueMsg(msg, nil)
  3245  	}
  3246  
  3247  	// If we haven't finished close negotiations, then we'll continue as we
  3248  	// can't yet finalize the closure.
  3249  	if !closeFin {
  3250  		return
  3251  	}
  3252  
  3253  	// Otherwise, we've agreed on a closing fee! In this case, we'll wrap up
  3254  	// the channel closure by notifying relevant sub-systems and launching a
  3255  	// goroutine to wait for close tx conf.
  3256  	p.finalizeChanClosure(chanCloser)
  3257  }
  3258  
  3259  // HandleLocalCloseChanReqs accepts a *htlcswitch.ChanClose and passes it onto
  3260  // the channelManager goroutine, which will shut down the link and possibly
  3261  // close the channel.
  3262  func (p *Brontide) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) {
  3263  	select {
  3264  	case p.localCloseChanReqs <- req:
  3265  		peerLog.Infof("Local close channel request delivered to "+
  3266  			"peer: %x", p.PubKey())
  3267  	case <-p.quit:
  3268  		peerLog.Infof("Unable to deliver local close channel request "+
  3269  			"to peer %x", p.PubKey())
  3270  	}
  3271  }
  3272  
  3273  // NetAddress returns the network of the remote peer as an lnwire.NetAddress.
  3274  func (p *Brontide) NetAddress() *lnwire.NetAddress {
  3275  	return p.cfg.Addr
  3276  }
  3277  
  3278  // Inbound is a getter for the Brontide's Inbound boolean in cfg.
  3279  func (p *Brontide) Inbound() bool {
  3280  	return p.cfg.Inbound
  3281  }
  3282  
  3283  // ConnReq is a getter for the Brontide's connReq in cfg.
  3284  func (p *Brontide) ConnReq() *connmgr.ConnReq {
  3285  	return p.cfg.ConnReq
  3286  }
  3287  
  3288  // ErrorBuffer is a getter for the Brontide's errorBuffer in cfg.
  3289  func (p *Brontide) ErrorBuffer() *queue.CircularBuffer {
  3290  	return p.cfg.ErrorBuffer
  3291  }
  3292  
  3293  // SetAddress sets the remote peer's address given an address.
  3294  func (p *Brontide) SetAddress(address net.Addr) {
  3295  	p.cfg.Addr.Address = address
  3296  }
  3297  
  3298  // ActiveSignal returns the peer's active signal.
  3299  func (p *Brontide) ActiveSignal() chan struct{} {
  3300  	return p.activeSignal
  3301  }
  3302  
  3303  // Conn returns a pointer to the peer's connection struct.
  3304  func (p *Brontide) Conn() net.Conn {
  3305  	return p.cfg.Conn
  3306  }
  3307  
  3308  // BytesReceived returns the number of bytes received from the peer.
  3309  func (p *Brontide) BytesReceived() uint64 {
  3310  	return atomic.LoadUint64(&p.bytesReceived)
  3311  }
  3312  
  3313  // BytesSent returns the number of bytes sent to the peer.
  3314  func (p *Brontide) BytesSent() uint64 {
  3315  	return atomic.LoadUint64(&p.bytesSent)
  3316  }
  3317  
  3318  // LastRemotePingPayload returns the last payload the remote party sent as part
  3319  // of their ping.
  3320  func (p *Brontide) LastRemotePingPayload() []byte {
  3321  	pingPayload := p.lastPingPayload.Load()
  3322  	if pingPayload == nil {
  3323  		return []byte{}
  3324  	}
  3325  
  3326  	pingBytes, ok := pingPayload.(lnwire.PingPayload)
  3327  	if !ok {
  3328  		return nil
  3329  	}
  3330  
  3331  	return pingBytes
  3332  }