github.com/decred/dcrlnd@v0.7.6/server.go (about)

     1  package dcrlnd
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"crypto/rand"
     7  	"encoding/hex"
     8  	"fmt"
     9  	"image/color"
    10  	"math/big"
    11  	prand "math/rand"
    12  	"net"
    13  	"regexp"
    14  	"strconv"
    15  	"strings"
    16  	"sync"
    17  	"sync/atomic"
    18  	"time"
    19  
    20  	"github.com/decred/dcrd/chaincfg/chainhash"
    21  	"github.com/decred/dcrd/connmgr"
    22  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    23  	"github.com/decred/dcrd/dcrutil/v4"
    24  	"github.com/decred/dcrd/wire"
    25  	"github.com/decred/dcrlnd/autopilot"
    26  	"github.com/decred/dcrlnd/brontide"
    27  	"github.com/decred/dcrlnd/cert"
    28  	"github.com/decred/dcrlnd/chainreg"
    29  	"github.com/decred/dcrlnd/chanacceptor"
    30  	"github.com/decred/dcrlnd/chanbackup"
    31  	"github.com/decred/dcrlnd/chanfitness"
    32  	"github.com/decred/dcrlnd/channeldb"
    33  	"github.com/decred/dcrlnd/channelnotifier"
    34  	"github.com/decred/dcrlnd/clock"
    35  	"github.com/decred/dcrlnd/contractcourt"
    36  	"github.com/decred/dcrlnd/discovery"
    37  	"github.com/decred/dcrlnd/feature"
    38  	"github.com/decred/dcrlnd/funding"
    39  	"github.com/decred/dcrlnd/healthcheck"
    40  	"github.com/decred/dcrlnd/htlcswitch"
    41  	"github.com/decred/dcrlnd/htlcswitch/hop"
    42  	"github.com/decred/dcrlnd/input"
    43  	"github.com/decred/dcrlnd/invoices"
    44  	"github.com/decred/dcrlnd/keychain"
    45  	"github.com/decred/dcrlnd/kvdb"
    46  	"github.com/decred/dcrlnd/lncfg"
    47  	"github.com/decred/dcrlnd/lnpeer"
    48  	"github.com/decred/dcrlnd/lnrpc"
    49  	"github.com/decred/dcrlnd/lnrpc/routerrpc"
    50  	"github.com/decred/dcrlnd/lnwallet"
    51  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    52  	"github.com/decred/dcrlnd/lnwire"
    53  	"github.com/decred/dcrlnd/nat"
    54  	"github.com/decred/dcrlnd/netann"
    55  	"github.com/decred/dcrlnd/peer"
    56  	"github.com/decred/dcrlnd/peernotifier"
    57  	"github.com/decred/dcrlnd/pool"
    58  	"github.com/decred/dcrlnd/queue"
    59  	"github.com/decred/dcrlnd/routing"
    60  	"github.com/decred/dcrlnd/routing/localchans"
    61  	"github.com/decred/dcrlnd/routing/route"
    62  	"github.com/decred/dcrlnd/subscribe"
    63  	"github.com/decred/dcrlnd/sweep"
    64  	"github.com/decred/dcrlnd/ticker"
    65  	"github.com/decred/dcrlnd/tor"
    66  	"github.com/decred/dcrlnd/walletunlocker"
    67  	"github.com/decred/dcrlnd/watchtower/blob"
    68  	"github.com/decred/dcrlnd/watchtower/wtclient"
    69  	"github.com/decred/dcrlnd/watchtower/wtpolicy"
    70  	"github.com/decred/dcrlnd/watchtower/wtserver"
    71  	sphinx "github.com/decred/lightning-onion/v4"
    72  	"github.com/go-errors/errors"
    73  )
    74  
    75  const (
    76  	// defaultMinPeers is the minimum number of peers nodes should always be
    77  	// connected to.
    78  	defaultMinPeers = 3
    79  
    80  	// defaultStableConnDuration is a floor under which all reconnection
    81  	// attempts will apply exponential randomized backoff. Connections
    82  	// durations exceeding this value will be eligible to have their
    83  	// backoffs reduced.
    84  	defaultStableConnDuration = 10 * time.Minute
    85  
    86  	// numInstantInitReconnect specifies how many persistent peers we should
    87  	// always attempt outbound connections to immediately. After this value
    88  	// is surpassed, the remaining peers will be randomly delayed using
    89  	// maxInitReconnectDelay.
    90  	numInstantInitReconnect = 10
    91  
    92  	// maxInitReconnectDelay specifies the maximum delay in seconds we will
    93  	// apply in attempting to reconnect to persistent peers on startup. The
    94  	// value used or a particular peer will be chosen between 0s and this
    95  	// value.
    96  	maxInitReconnectDelay = 30
    97  
    98  	// multiAddrConnectionStagger is the number of seconds to wait between
    99  	// attempting to a peer with each of its advertised addresses.
   100  	multiAddrConnectionStagger = 10 * time.Second
   101  )
   102  
   103  var (
   104  	// ErrPeerNotConnected signals that the server has no connection to the
   105  	// given peer.
   106  	ErrPeerNotConnected = errors.New("peer is not connected")
   107  
   108  	// ErrServerNotActive indicates that the server has started but hasn't
   109  	// fully finished the startup process.
   110  	ErrServerNotActive = errors.New("server is still in the process of " +
   111  		"starting")
   112  
   113  	// ErrServerShuttingDown indicates that the server is in the process of
   114  	// gracefully exiting.
   115  	ErrServerShuttingDown = errors.New("server is shutting down")
   116  
   117  	// validColorRegexp is a regexp that lets you check if a particular
   118  	// color string matches the standard hex color format #RRGGBB.
   119  	validColorRegexp = regexp.MustCompile("^#[A-Fa-f0-9]{6}$")
   120  
   121  	// MaxFundingAmount is a soft-limit of the maximum channel size
   122  	// currently accepted within the Lightning Protocol. This is
   123  	// defined in BOLT-0002, and serves as an initial precautionary limit
   124  	// while implementations are battle tested in the real world.
   125  	//
   126  	// At the moment, this value depends on which chain is active. It is set
   127  	// to the value under the Bitcoin chain as default.
   128  	//
   129  	// TODO(roasbeef): add command line param to modify
   130  	MaxFundingAmount = funding.MaxFundingAmount
   131  )
   132  
   133  // errPeerAlreadyConnected is an error returned by the server when we're
   134  // commanded to connect to a peer, but they're already connected.
   135  type errPeerAlreadyConnected struct {
   136  	peer *peer.Brontide
   137  }
   138  
   139  // Error returns the human readable version of this error type.
   140  //
   141  // NOTE: Part of the error interface.
   142  func (e *errPeerAlreadyConnected) Error() string {
   143  	return fmt.Sprintf("already connected to peer: %v", e.peer)
   144  }
   145  
   146  // server is the main server of the Lightning Network Daemon. The server houses
   147  // global state pertaining to the wallet, database, and the rpcserver.
   148  // Additionally, the server is also used as a central messaging bus to interact
   149  // with any of its companion objects.
   150  type server struct {
   151  	active   int32 // atomic
   152  	stopping int32 // atomic
   153  
   154  	start sync.Once
   155  	stop  sync.Once
   156  
   157  	cfg *Config
   158  
   159  	// identityECDH is an ECDH capable wrapper for the private key used to
   160  	// authenticate any incoming connections.
   161  	identityECDH keychain.SingleKeyECDH
   162  
   163  	// identityKeyLoc is the key locator for the above wrapped identity key.
   164  	identityKeyLoc keychain.KeyLocator
   165  
   166  	// nodeSigner is an implementation of the MessageSigner implementation
   167  	// that's backed by the identity private key of the running lnd node.
   168  	nodeSigner *netann.NodeSigner
   169  
   170  	chanStatusMgr *netann.ChanStatusManager
   171  
   172  	// listenAddrs is the list of addresses the server is currently
   173  	// listening on.
   174  	listenAddrs []net.Addr
   175  
   176  	// torController is a client that will communicate with a locally
   177  	// running Tor server. This client will handle initiating and
   178  	// authenticating the connection to the Tor server, automatically
   179  	// creating and setting up onion services, etc.
   180  	torController *tor.Controller
   181  
   182  	// natTraversal is the specific NAT traversal technique used to
   183  	// automatically set up port forwarding rules in order to advertise to
   184  	// the network that the node is accepting inbound connections.
   185  	natTraversal nat.Traversal
   186  
   187  	// lastDetectedIP is the last IP detected by the NAT traversal technique
   188  	// above. This IP will be watched periodically in a goroutine in order
   189  	// to handle dynamic IP changes.
   190  	lastDetectedIP net.IP
   191  
   192  	mu         sync.RWMutex
   193  	peersByPub map[string]*peer.Brontide
   194  
   195  	inboundPeers  map[string]*peer.Brontide
   196  	outboundPeers map[string]*peer.Brontide
   197  
   198  	peerConnectedListeners    map[string][]chan<- lnpeer.Peer
   199  	peerDisconnectedListeners map[string][]chan<- struct{}
   200  
   201  	// TODO(yy): the Brontide.Start doesn't know this value, which means it
   202  	// will continue to send messages even if there are no active channels
   203  	// and the value below is false. Once it's pruned, all its connections
   204  	// will be closed, thus the Brontide.Start will return an error.
   205  	persistentPeers        map[string]bool
   206  	persistentPeersBackoff map[string]time.Duration
   207  	persistentPeerAddrs    map[string][]*lnwire.NetAddress
   208  	persistentConnReqs     map[string][]*connmgr.ConnReq
   209  	persistentRetryCancels map[string]chan struct{}
   210  
   211  	// peerErrors keeps a set of peer error buffers for peers that have
   212  	// disconnected from us. This allows us to track historic peer errors
   213  	// over connections. The string of the peer's compressed pubkey is used
   214  	// as a key for this map.
   215  	peerErrors map[string]*queue.CircularBuffer
   216  
   217  	// ignorePeerTermination tracks peers for which the server has initiated
   218  	// a disconnect. Adding a peer to this map causes the peer termination
   219  	// watcher to short circuit in the event that peers are purposefully
   220  	// disconnected.
   221  	ignorePeerTermination map[*peer.Brontide]struct{}
   222  
   223  	// scheduledPeerConnection maps a pubkey string to a callback that
   224  	// should be executed in the peerTerminationWatcher the prior peer with
   225  	// the same pubkey exits.  This allows the server to wait until the
   226  	// prior peer has cleaned up successfully, before adding the new peer
   227  	// intended to replace it.
   228  	scheduledPeerConnection map[string]func()
   229  
   230  	// pongBuf is a shared pong reply buffer we'll use across all active
   231  	// peer goroutines. We know the max size of a pong message
   232  	// (lnwire.MaxPongBytes), so we can allocate this ahead of time, and
   233  	// avoid allocations each time we need to send a pong message.
   234  	pongBuf []byte
   235  
   236  	cc *chainreg.ChainControl
   237  
   238  	fundingMgr *funding.Manager
   239  
   240  	graphDB *channeldb.ChannelGraph
   241  
   242  	chanStateDB *channeldb.ChannelStateDB
   243  
   244  	addrSource chanbackup.AddressSource
   245  
   246  	// miscDB is the DB that contains all "other" databases within the main
   247  	// channel DB that haven't been separated out yet.
   248  	miscDB *channeldb.DB
   249  
   250  	htlcSwitch *htlcswitch.Switch
   251  
   252  	interceptableSwitch *htlcswitch.InterceptableSwitch
   253  
   254  	invoices *invoices.InvoiceRegistry
   255  
   256  	channelNotifier *channelnotifier.ChannelNotifier
   257  
   258  	peerNotifier *peernotifier.PeerNotifier
   259  
   260  	htlcNotifier *htlcswitch.HtlcNotifier
   261  
   262  	witnessBeacon contractcourt.WitnessBeacon
   263  
   264  	breachArbiter *contractcourt.BreachArbiter
   265  
   266  	missionControl *routing.MissionControl
   267  
   268  	chanRouter *routing.ChannelRouter
   269  
   270  	controlTower routing.ControlTower
   271  
   272  	authGossiper *discovery.AuthenticatedGossiper
   273  
   274  	localChanMgr *localchans.Manager
   275  
   276  	utxoNursery *contractcourt.UtxoNursery
   277  
   278  	sweeper *sweep.UtxoSweeper
   279  
   280  	chainArb *contractcourt.ChainArbitrator
   281  
   282  	sphinx *hop.OnionProcessor
   283  
   284  	towerClient wtclient.Client
   285  
   286  	anchorTowerClient wtclient.Client
   287  
   288  	connMgr *connmgr.ConnManager
   289  
   290  	sigPool *lnwallet.SigPool
   291  
   292  	writePool *pool.Write
   293  
   294  	readPool *pool.Read
   295  
   296  	// featureMgr dispatches feature vectors for various contexts within the
   297  	// daemon.
   298  	featureMgr *feature.Manager
   299  
   300  	// currentNodeAnn is the node announcement that has been broadcast to
   301  	// the network upon startup, if the attributes of the node (us) has
   302  	// changed since last start.
   303  	currentNodeAnn *lnwire.NodeAnnouncement
   304  
   305  	// chansToRestore is the set of channels that upon starting, the server
   306  	// should attempt to restore/recover.
   307  	chansToRestore walletunlocker.ChannelsToRecover
   308  
   309  	// chanSubSwapper is a sub-system that will ensure our on-disk channel
   310  	// backups are consistent at all times. It interacts with the
   311  	// channelNotifier to be notified of newly opened and closed channels.
   312  	chanSubSwapper *chanbackup.SubSwapper
   313  
   314  	// chanEventStore tracks the behaviour of channels and their remote peers to
   315  	// provide insights into their health and performance.
   316  	chanEventStore *chanfitness.ChannelEventStore
   317  
   318  	hostAnn *netann.HostAnnouncer
   319  
   320  	// livelinessMonitor monitors that lnd has access to critical resources.
   321  	livelinessMonitor *healthcheck.Monitor
   322  
   323  	customMessageServer *subscribe.Server
   324  
   325  	quit chan struct{}
   326  
   327  	wg sync.WaitGroup
   328  }
   329  
   330  // updatePersistentPeerAddrs subscribes to topology changes and stores
   331  // advertised addresses for any NodeAnnouncements from our persisted peers.
   332  func (s *server) updatePersistentPeerAddrs() error {
   333  	graphSub, err := s.chanRouter.SubscribeTopology()
   334  	if err != nil {
   335  		return err
   336  	}
   337  
   338  	s.wg.Add(1)
   339  	go func() {
   340  		defer func() {
   341  			graphSub.Cancel()
   342  			s.wg.Done()
   343  		}()
   344  
   345  		for {
   346  			select {
   347  
   348  			case <-s.quit:
   349  				return
   350  
   351  			case topChange, ok := <-graphSub.TopologyChanges:
   352  				// If the router is shutting down, then we will
   353  				// as well.
   354  				if !ok {
   355  					return
   356  				}
   357  
   358  				for _, update := range topChange.NodeUpdates {
   359  					pubKeyStr := string(
   360  						update.IdentityKey.
   361  							SerializeCompressed(),
   362  					)
   363  
   364  					// We only care about updates from
   365  					// our persistentPeers.
   366  					s.mu.RLock()
   367  					_, ok := s.persistentPeers[pubKeyStr]
   368  					s.mu.RUnlock()
   369  					if !ok {
   370  						continue
   371  					}
   372  
   373  					addrs := make([]*lnwire.NetAddress, 0,
   374  						len(update.Addresses))
   375  
   376  					for _, addr := range update.Addresses {
   377  						addrs = append(addrs,
   378  							&lnwire.NetAddress{
   379  								IdentityKey: update.IdentityKey,
   380  								Address:     addr,
   381  								ChainNet:    s.cfg.ActiveNetParams.Net,
   382  							},
   383  						)
   384  					}
   385  
   386  					s.mu.Lock()
   387  
   388  					// Update the stored addresses for this
   389  					// to peer to reflect the new set.
   390  					s.persistentPeerAddrs[pubKeyStr] = addrs
   391  
   392  					// If there are no outstanding
   393  					// connection requests for this peer
   394  					// then our work is done since we are
   395  					// not currently trying to connect to
   396  					// them.
   397  					if len(s.persistentConnReqs[pubKeyStr]) == 0 {
   398  						s.mu.Unlock()
   399  						continue
   400  					}
   401  
   402  					s.mu.Unlock()
   403  
   404  					s.connectToPersistentPeer(pubKeyStr)
   405  				}
   406  			}
   407  		}
   408  	}()
   409  
   410  	return nil
   411  }
   412  
   413  // CustomMessage is a custom message that is received from a peer.
   414  type CustomMessage struct {
   415  	// Peer is the peer pubkey
   416  	Peer [33]byte
   417  
   418  	// Msg is the custom wire message.
   419  	Msg *lnwire.Custom
   420  }
   421  
   422  // parseAddr parses an address from its string format to a net.Addr.
   423  func parseAddr(address string, netCfg tor.Net) (net.Addr, error) {
   424  	var (
   425  		host string
   426  		port int
   427  	)
   428  
   429  	// Split the address into its host and port components.
   430  	h, p, err := net.SplitHostPort(address)
   431  	if err != nil {
   432  		// If a port wasn't specified, we'll assume the address only
   433  		// contains the host so we'll use the default port.
   434  		host = address
   435  		port = defaultPeerPort
   436  	} else {
   437  		// Otherwise, we'll note both the host and ports.
   438  		host = h
   439  		portNum, err := strconv.Atoi(p)
   440  		if err != nil {
   441  			return nil, err
   442  		}
   443  		port = portNum
   444  	}
   445  
   446  	if tor.IsOnionHost(host) {
   447  		return &tor.OnionAddr{OnionService: host, Port: port}, nil
   448  	}
   449  
   450  	// If the host is part of a TCP address, we'll use the network
   451  	// specific ResolveTCPAddr function in order to resolve these
   452  	// addresses over Tor in order to prevent leaking your real IP
   453  	// address.
   454  	hostPort := net.JoinHostPort(host, strconv.Itoa(port))
   455  	return netCfg.ResolveTCPAddr("tcp", hostPort)
   456  }
   457  
   458  // noiseDial is a factory function which creates a connmgr compliant dialing
   459  // function by returning a closure which includes the server's identity key.
   460  func noiseDial(idKey keychain.SingleKeyECDH,
   461  	netCfg tor.Net, timeout time.Duration) func(net.Addr) (net.Conn, error) {
   462  
   463  	return func(a net.Addr) (net.Conn, error) {
   464  		lnAddr := a.(*lnwire.NetAddress)
   465  		return brontide.Dial(idKey, lnAddr, timeout, netCfg.Dial)
   466  	}
   467  }
   468  
   469  // newServer creates a new instance of the server which is to listen using the
   470  // passed listener address.
   471  func newServer(cfg *Config, listenAddrs []net.Addr,
   472  	dbs *DatabaseInstances, cc *chainreg.ChainControl,
   473  	nodeKeyDesc *keychain.KeyDescriptor,
   474  	chansToRestore walletunlocker.ChannelsToRecover,
   475  	chanPredicate chanacceptor.ChannelAcceptor,
   476  	torController *tor.Controller) (*server, error) {
   477  
   478  	var (
   479  		err         error
   480  		nodeKeyECDH = keychain.NewPubKeyECDH(*nodeKeyDesc, cc.KeyRing)
   481  
   482  		// We just derived the full descriptor, so we know the public
   483  		// key is set on it.
   484  		nodeKeySigner = keychain.NewPubKeyMessageSigner(
   485  			nodeKeyDesc.PubKey, nodeKeyDesc.KeyLocator, cc.KeyRing,
   486  		)
   487  	)
   488  
   489  	listeners := make([]net.Listener, len(listenAddrs))
   490  	for i, listenAddr := range listenAddrs {
   491  		// Note: though brontide.NewListener uses ResolveTCPAddr, it
   492  		// doesn't need to call the general lndResolveTCP function
   493  		// since we are resolving a local address.
   494  		listeners[i], err = brontide.NewListener(
   495  			nodeKeyECDH, listenAddr.String(),
   496  		)
   497  		if err != nil {
   498  			return nil, err
   499  		}
   500  	}
   501  
   502  	var serializedPubKey [33]byte
   503  	copy(serializedPubKey[:], nodeKeyDesc.PubKey.SerializeCompressed())
   504  
   505  	// Initialize the sphinx router.
   506  	replayLog := htlcswitch.NewDecayedLog(
   507  		dbs.DecayedLogDB, cc.ChainNotifier,
   508  	)
   509  	sphinxRouter := sphinx.NewRouter(
   510  		nodeKeyECDH, cfg.ActiveNetParams.Params, replayLog,
   511  	)
   512  
   513  	writeBufferPool := pool.NewWriteBuffer(
   514  		pool.DefaultWriteBufferGCInterval,
   515  		pool.DefaultWriteBufferExpiryInterval,
   516  	)
   517  
   518  	writePool := pool.NewWrite(
   519  		writeBufferPool, cfg.Workers.Write, pool.DefaultWorkerTimeout,
   520  	)
   521  
   522  	readBufferPool := pool.NewReadBuffer(
   523  		pool.DefaultReadBufferGCInterval,
   524  		pool.DefaultReadBufferExpiryInterval,
   525  	)
   526  
   527  	readPool := pool.NewRead(
   528  		readBufferPool, cfg.Workers.Read, pool.DefaultWorkerTimeout,
   529  	)
   530  
   531  	featureMgr, err := feature.NewManager(feature.Config{
   532  		NoTLVOnion:               cfg.ProtocolOptions.LegacyOnion(),
   533  		NoStaticRemoteKey:        cfg.ProtocolOptions.NoStaticRemoteKey(),
   534  		NoAnchors:                cfg.ProtocolOptions.NoAnchorCommitments(),
   535  		NoWumbo:                  !cfg.ProtocolOptions.Wumbo(),
   536  		NoScriptEnforcementLease: !cfg.ProtocolOptions.ScriptEnforcementLease(),
   537  	})
   538  	if err != nil {
   539  		return nil, err
   540  	}
   541  
   542  	registryConfig := invoices.RegistryConfig{
   543  		FinalCltvRejectDelta:        lncfg.DefaultFinalCltvRejectDelta,
   544  		HtlcHoldDuration:            invoices.DefaultHtlcHoldDuration,
   545  		Clock:                       clock.NewDefaultClock(),
   546  		AcceptKeySend:               cfg.AcceptKeySend,
   547  		AcceptAMP:                   cfg.AcceptAMP,
   548  		GcCanceledInvoicesOnStartup: cfg.GcCanceledInvoicesOnStartup,
   549  		GcCanceledInvoicesOnTheFly:  cfg.GcCanceledInvoicesOnTheFly,
   550  		KeysendHoldTime:             cfg.KeysendHoldTime,
   551  	}
   552  
   553  	s := &server{
   554  		cfg:            cfg,
   555  		graphDB:        dbs.GraphDB.ChannelGraph(),
   556  		chanStateDB:    dbs.ChanStateDB.ChannelStateDB(),
   557  		addrSource:     dbs.ChanStateDB,
   558  		miscDB:         dbs.ChanStateDB,
   559  		cc:             cc,
   560  		sigPool:        lnwallet.NewSigPool(cfg.Workers.Sig, cc.Signer),
   561  		writePool:      writePool,
   562  		readPool:       readPool,
   563  		chansToRestore: chansToRestore,
   564  
   565  		channelNotifier: channelnotifier.New(
   566  			dbs.ChanStateDB.ChannelStateDB(),
   567  		),
   568  
   569  		identityECDH:   nodeKeyECDH,
   570  		identityKeyLoc: nodeKeyDesc.KeyLocator,
   571  		nodeSigner:     netann.NewNodeSigner(nodeKeySigner),
   572  
   573  		listenAddrs: listenAddrs,
   574  
   575  		// TODO(roasbeef): derive proper onion key based on rotation
   576  		// schedule
   577  		sphinx: hop.NewOnionProcessor(sphinxRouter),
   578  
   579  		torController: torController,
   580  
   581  		persistentPeers:         make(map[string]bool),
   582  		persistentPeersBackoff:  make(map[string]time.Duration),
   583  		persistentConnReqs:      make(map[string][]*connmgr.ConnReq),
   584  		persistentPeerAddrs:     make(map[string][]*lnwire.NetAddress),
   585  		persistentRetryCancels:  make(map[string]chan struct{}),
   586  		peerErrors:              make(map[string]*queue.CircularBuffer),
   587  		ignorePeerTermination:   make(map[*peer.Brontide]struct{}),
   588  		scheduledPeerConnection: make(map[string]func()),
   589  		pongBuf:                 make([]byte, lnwire.MaxPongBytes),
   590  
   591  		peersByPub:                make(map[string]*peer.Brontide),
   592  		inboundPeers:              make(map[string]*peer.Brontide),
   593  		outboundPeers:             make(map[string]*peer.Brontide),
   594  		peerConnectedListeners:    make(map[string][]chan<- lnpeer.Peer),
   595  		peerDisconnectedListeners: make(map[string][]chan<- struct{}),
   596  
   597  		customMessageServer: subscribe.NewServer(),
   598  
   599  		featureMgr: featureMgr,
   600  		quit:       make(chan struct{}),
   601  	}
   602  
   603  	s.witnessBeacon = &preimageBeacon{
   604  		wCache:      dbs.ChanStateDB.NewWitnessCache(),
   605  		subscribers: make(map[uint64]*preimageSubscriber),
   606  	}
   607  
   608  	currentHash, currentHeight, err := s.cc.ChainIO.GetBestBlock()
   609  	if err != nil {
   610  		return nil, err
   611  	}
   612  
   613  	expiryWatcher := invoices.NewInvoiceExpiryWatcher(
   614  		clock.NewDefaultClock(), cfg.Invoices.HoldExpiryDelta,
   615  		uint32(currentHeight), currentHash, cc.ChainNotifier,
   616  	)
   617  	s.invoices = invoices.NewRegistry(
   618  		dbs.ChanStateDB, expiryWatcher, &registryConfig,
   619  	)
   620  
   621  	s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now)
   622  
   623  	thresholdSats := dcrutil.Amount(cfg.DustThreshold)
   624  	thresholdMSats := lnwire.NewMAtomsFromAtoms(thresholdSats)
   625  
   626  	s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{
   627  		DB:                   dbs.ChanStateDB,
   628  		FetchAllOpenChannels: s.chanStateDB.FetchAllOpenChannels,
   629  		FetchClosedChannels:  s.chanStateDB.FetchClosedChannels,
   630  		LocalChannelClose: func(pubKey []byte,
   631  			request *htlcswitch.ChanClose) {
   632  
   633  			peer, err := s.FindPeerByPubStr(string(pubKey))
   634  			if err != nil {
   635  				srvrLog.Errorf("unable to close channel, peer"+
   636  					" with %v id can't be found: %v",
   637  					pubKey, err,
   638  				)
   639  				return
   640  			}
   641  
   642  			peer.HandleLocalCloseChanReqs(request)
   643  		},
   644  		FwdingLog:              dbs.ChanStateDB.ForwardingLog(),
   645  		SwitchPackager:         channeldb.NewSwitchPackager(),
   646  		ExtractErrorEncrypter:  s.sphinx.ExtractErrorEncrypter,
   647  		FetchLastChannelUpdate: s.fetchLastChanUpdate(),
   648  		Notifier:               s.cc.ChainNotifier,
   649  		HtlcNotifier:           s.htlcNotifier,
   650  		FwdEventTicker:         ticker.New(htlcswitch.DefaultFwdEventInterval),
   651  		LogEventTicker:         ticker.New(htlcswitch.DefaultLogInterval),
   652  		AckEventTicker:         ticker.New(htlcswitch.DefaultAckInterval),
   653  		AllowCircularRoute:     cfg.AllowCircularRoute,
   654  		RejectHTLC:             cfg.RejectHTLC,
   655  		Clock:                  clock.NewDefaultClock(),
   656  		HTLCExpiry:             htlcswitch.DefaultHTLCExpiry,
   657  		DustThreshold:          thresholdMSats,
   658  	}, uint32(currentHeight))
   659  	if err != nil {
   660  		return nil, err
   661  	}
   662  	s.interceptableSwitch = htlcswitch.NewInterceptableSwitch(s.htlcSwitch)
   663  
   664  	chanStatusMgrCfg := &netann.ChanStatusConfig{
   665  		ChanStatusSampleInterval: cfg.ChanStatusSampleInterval,
   666  		ChanEnableTimeout:        cfg.ChanEnableTimeout,
   667  		ChanDisableTimeout:       cfg.ChanDisableTimeout,
   668  		OurPubKey:                nodeKeyDesc.PubKey,
   669  		OurKeyLoc:                nodeKeyDesc.KeyLocator,
   670  		MessageSigner:            s.nodeSigner,
   671  		IsChannelActive:          s.htlcSwitch.HasActiveLink,
   672  		ApplyChannelUpdate:       s.applyChannelUpdate,
   673  		DB:                       s.chanStateDB,
   674  		Graph:                    dbs.GraphDB.ChannelGraph(),
   675  	}
   676  
   677  	chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg)
   678  	if err != nil {
   679  		return nil, err
   680  	}
   681  	s.chanStatusMgr = chanStatusMgr
   682  
   683  	// If enabled, use either UPnP or NAT-PMP to automatically configure
   684  	// port forwarding for users behind a NAT.
   685  	if cfg.NAT {
   686  		srvrLog.Info("Scanning local network for a UPnP enabled device")
   687  
   688  		discoveryTimeout := 10 * time.Second
   689  
   690  		ctx, cancel := context.WithTimeout(
   691  			context.Background(), discoveryTimeout,
   692  		)
   693  		defer cancel()
   694  		upnp, err := nat.DiscoverUPnP(ctx)
   695  		if err == nil {
   696  			s.natTraversal = upnp
   697  		} else {
   698  			// If we were not able to discover a UPnP enabled device
   699  			// on the local network, we'll fall back to attempting
   700  			// to discover a NAT-PMP enabled device.
   701  			srvrLog.Errorf("unable to discover a UPnP enabled "+
   702  				"device on the local network: %v", err)
   703  
   704  			srvrLog.Info("Scanning local network for a NAT-PMP " +
   705  				"enabled device")
   706  
   707  			pmp, err := nat.DiscoverPMP(discoveryTimeout)
   708  			if err != nil {
   709  				err := fmt.Errorf("unable to discover a "+
   710  					"NAT-PMP enabled device on the local "+
   711  					"network: %v", err)
   712  				srvrLog.Error(err)
   713  				return nil, err
   714  			}
   715  
   716  			s.natTraversal = pmp
   717  		}
   718  	}
   719  
   720  	// If we were requested to automatically configure port forwarding,
   721  	// we'll use the ports that the server will be listening on.
   722  	externalIPStrings := make([]string, len(cfg.ExternalIPs))
   723  	for idx, ip := range cfg.ExternalIPs {
   724  		externalIPStrings[idx] = ip.String()
   725  	}
   726  	if s.natTraversal != nil {
   727  		listenPorts := make([]uint16, 0, len(listenAddrs))
   728  		for _, listenAddr := range listenAddrs {
   729  			// At this point, the listen addresses should have
   730  			// already been normalized, so it's safe to ignore the
   731  			// errors.
   732  			_, portStr, _ := net.SplitHostPort(listenAddr.String())
   733  			port, _ := strconv.Atoi(portStr)
   734  
   735  			listenPorts = append(listenPorts, uint16(port))
   736  		}
   737  
   738  		ips, err := s.configurePortForwarding(listenPorts...)
   739  		if err != nil {
   740  			srvrLog.Errorf("Unable to automatically set up port "+
   741  				"forwarding using %s: %v",
   742  				s.natTraversal.Name(), err)
   743  		} else {
   744  			srvrLog.Infof("Automatically set up port forwarding "+
   745  				"using %s to advertise external IP",
   746  				s.natTraversal.Name())
   747  			externalIPStrings = append(externalIPStrings, ips...)
   748  		}
   749  	}
   750  
   751  	// If external IP addresses have been specified, add those to the list
   752  	// of this server's addresses.
   753  	externalIPs, err := lncfg.NormalizeAddresses(
   754  		externalIPStrings, strconv.Itoa(defaultPeerPort),
   755  		cfg.net.ResolveTCPAddr,
   756  	)
   757  	if err != nil {
   758  		return nil, err
   759  	}
   760  	selfAddrs := make([]net.Addr, 0, len(externalIPs))
   761  	selfAddrs = append(selfAddrs, externalIPs...)
   762  
   763  	// As the graph can be obtained at anytime from the network, we won't
   764  	// replicate it, and instead it'll only be stored locally.
   765  	chanGraph := dbs.GraphDB.ChannelGraph()
   766  
   767  	// We'll now reconstruct a node announcement based on our current
   768  	// configuration so we can send it out as a sort of heart beat within
   769  	// the network.
   770  	//
   771  	// We'll start by parsing the node color from configuration.
   772  	color, err := parseHexColor(cfg.Color)
   773  	if err != nil {
   774  		srvrLog.Errorf("unable to parse color: %v\n", err)
   775  		return nil, err
   776  	}
   777  
   778  	// If no alias is provided, default to first 10 characters of public
   779  	// key.
   780  	alias := cfg.Alias
   781  	if alias == "" {
   782  		alias = hex.EncodeToString(serializedPubKey[:10])
   783  	}
   784  	nodeAlias, err := lnwire.NewNodeAlias(alias)
   785  	if err != nil {
   786  		return nil, err
   787  	}
   788  	selfNode := &channeldb.LightningNode{
   789  		HaveNodeAnnouncement: true,
   790  		LastUpdate:           time.Now(),
   791  		Addresses:            selfAddrs,
   792  		Alias:                nodeAlias.String(),
   793  		Features:             s.featureMgr.Get(feature.SetNodeAnn),
   794  		Color:                color,
   795  	}
   796  	copy(selfNode.PubKeyBytes[:], nodeKeyDesc.PubKey.SerializeCompressed())
   797  
   798  	// Based on the disk representation of the node announcement generated
   799  	// above, we'll generate a node announcement that can go out on the
   800  	// network so we can properly sign it.
   801  	nodeAnn, err := selfNode.NodeAnnouncement(false)
   802  	if err != nil {
   803  		return nil, fmt.Errorf("unable to gen self node ann: %v", err)
   804  	}
   805  
   806  	// With the announcement generated, we'll sign it to properly
   807  	// authenticate the message on the network.
   808  	authSig, err := netann.SignAnnouncement(
   809  		s.nodeSigner, nodeKeyDesc.KeyLocator, nodeAnn,
   810  	)
   811  	if err != nil {
   812  		return nil, fmt.Errorf("unable to generate signature for "+
   813  			"self node announcement: %v", err)
   814  	}
   815  	selfNode.AuthSigBytes = authSig.Serialize()
   816  	nodeAnn.Signature, err = lnwire.NewSigFromRawSignature(
   817  		selfNode.AuthSigBytes,
   818  	)
   819  	if err != nil {
   820  		return nil, err
   821  	}
   822  
   823  	// Finally, we'll update the representation on disk, and update our
   824  	// cached in-memory version as well.
   825  	if err := chanGraph.SetSourceNode(selfNode); err != nil {
   826  		return nil, fmt.Errorf("can't set self node: %v", err)
   827  	}
   828  	s.currentNodeAnn = nodeAnn
   829  
   830  	// The router will get access to the payment ID sequencer, such that it
   831  	// can generate unique payment IDs.
   832  	sequencer, err := htlcswitch.NewPersistentSequencer(dbs.ChanStateDB)
   833  	if err != nil {
   834  		return nil, err
   835  	}
   836  
   837  	// Instantiate mission control with config from the sub server.
   838  	//
   839  	// TODO(joostjager): When we are further in the process of moving to sub
   840  	// servers, the mission control instance itself can be moved there too.
   841  	routingConfig := routerrpc.GetRoutingConfig(cfg.SubRPCServers.RouterRPC)
   842  
   843  	estimatorCfg := routing.ProbabilityEstimatorCfg{
   844  		AprioriHopProbability: routingConfig.AprioriHopProbability,
   845  		PenaltyHalfLife:       routingConfig.PenaltyHalfLife,
   846  		AprioriWeight:         routingConfig.AprioriWeight,
   847  	}
   848  
   849  	s.missionControl, err = routing.NewMissionControl(
   850  		dbs.ChanStateDB, selfNode.PubKeyBytes,
   851  		&routing.MissionControlConfig{
   852  			ProbabilityEstimatorCfg: estimatorCfg,
   853  			MaxMcHistory:            routingConfig.MaxMcHistory,
   854  			McFlushInterval:         routingConfig.McFlushInterval,
   855  			MinFailureRelaxInterval: routing.DefaultMinFailureRelaxInterval,
   856  		},
   857  	)
   858  	if err != nil {
   859  		return nil, fmt.Errorf("can't create mission control: %v", err)
   860  	}
   861  
   862  	srvrLog.Debugf("Instantiating payment session source with config: "+
   863  		"AttemptCost=%v + %v%%, MinRouteProbability=%v",
   864  		int64(routingConfig.AttemptCost),
   865  		float64(routingConfig.AttemptCostPPM)/10000,
   866  		routingConfig.MinRouteProbability)
   867  
   868  	pathFindingConfig := routing.PathFindingConfig{
   869  		AttemptCost: lnwire.NewMAtomsFromAtoms(
   870  			routingConfig.AttemptCost,
   871  		),
   872  		AttemptCostPPM: routingConfig.AttemptCostPPM,
   873  		MinProbability: routingConfig.MinRouteProbability,
   874  	}
   875  
   876  	sourceNode, err := chanGraph.SourceNode()
   877  	if err != nil {
   878  		return nil, fmt.Errorf("error getting source node: %v", err)
   879  	}
   880  	paymentSessionSource := &routing.SessionSource{
   881  		Graph:             chanGraph,
   882  		SourceNode:        sourceNode,
   883  		MissionControl:    s.missionControl,
   884  		GetLink:           s.htlcSwitch.GetLinkByShortID,
   885  		PathFindingConfig: pathFindingConfig,
   886  	}
   887  
   888  	paymentControl := channeldb.NewPaymentControl(dbs.ChanStateDB)
   889  
   890  	s.controlTower = routing.NewControlTower(paymentControl)
   891  
   892  	strictPruning := (cfg.Dcrwallet.SPV ||
   893  		cfg.Routing.StrictZombiePruning)
   894  	s.chanRouter, err = routing.New(routing.Config{
   895  		Graph:               chanGraph,
   896  		Chain:               cc.ChainIO,
   897  		ChainView:           cc.ChainView,
   898  		Notifier:            cc.ChainNotifier,
   899  		Payer:               s.htlcSwitch,
   900  		Control:             s.controlTower,
   901  		MissionControl:      s.missionControl,
   902  		SessionSource:       paymentSessionSource,
   903  		ChannelPruneExpiry:  routing.DefaultChannelPruneExpiry,
   904  		GraphPruneInterval:  time.Hour,
   905  		FirstTimePruneDelay: routing.DefaultFirstTimePruneDelay,
   906  		GetLink:             s.htlcSwitch.GetLinkByShortID,
   907  		AssumeChannelValid:  cfg.Routing.AssumeChannelValid,
   908  		NextPaymentID:       sequencer.NextID,
   909  		PathFindingConfig:   pathFindingConfig,
   910  		LocalOpenChanIDs:    dbs.ChanStateDB.LocalOpenChanIDs,
   911  		Clock:               clock.NewDefaultClock(),
   912  		StrictZombiePruning: strictPruning,
   913  	})
   914  	if err != nil {
   915  		return nil, fmt.Errorf("can't create router: %v", err)
   916  	}
   917  
   918  	chanSeries := discovery.NewChanSeries(s.graphDB)
   919  	gossipMessageStore, err := discovery.NewMessageStore(dbs.ChanStateDB)
   920  	if err != nil {
   921  		return nil, err
   922  	}
   923  	waitingProofStore, err := channeldb.NewWaitingProofStore(dbs.ChanStateDB)
   924  	if err != nil {
   925  		return nil, err
   926  	}
   927  
   928  	s.authGossiper = discovery.New(discovery.Config{
   929  		Router:            s.chanRouter,
   930  		Notifier:          s.cc.ChainNotifier,
   931  		ChainHash:         s.cfg.ActiveNetParams.GenesisHash,
   932  		Broadcast:         s.BroadcastMessage,
   933  		ChanSeries:        chanSeries,
   934  		GossiperState:     discovery.NewGossiperState(dbs.ChanStateDB),
   935  		NotifyWhenOnline:  s.NotifyWhenOnline,
   936  		NotifyWhenOffline: s.NotifyWhenOffline,
   937  		SelfNodeAnnouncement: func(refresh bool) (lnwire.NodeAnnouncement, error) {
   938  			return s.genNodeAnnouncement(refresh)
   939  		},
   940  		ProofMatureDelta:        0,
   941  		TrickleDelay:            time.Millisecond * time.Duration(cfg.TrickleDelay),
   942  		RetransmitTicker:        ticker.New(time.Minute * 30),
   943  		RebroadcastInterval:     time.Hour * 24,
   944  		WaitingProofStore:       waitingProofStore,
   945  		MessageStore:            gossipMessageStore,
   946  		AnnSigner:               s.nodeSigner,
   947  		RotateTicker:            ticker.New(discovery.DefaultSyncerRotationInterval),
   948  		HistoricalSyncTicker:    ticker.New(cfg.HistoricalSyncInterval),
   949  		NumActiveSyncers:        cfg.NumGraphSyncPeers,
   950  		MinimumBatchSize:        10,
   951  		SubBatchDelay:           time.Second * 5,
   952  		IgnoreHistoricalFilters: cfg.IgnoreHistoricalGossipFilters,
   953  		PinnedSyncers:           cfg.Gossip.PinnedSyncers,
   954  		MaxChannelUpdateBurst:   cfg.Gossip.MaxChannelUpdateBurst,
   955  		ChannelUpdateInterval:   cfg.Gossip.ChannelUpdateInterval,
   956  	}, nodeKeyDesc)
   957  
   958  	s.localChanMgr = &localchans.Manager{
   959  		ForAllOutgoingChannels:    s.chanRouter.ForAllOutgoingChannels,
   960  		PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,
   961  		UpdateForwardingPolicies:  s.htlcSwitch.UpdateForwardingPolicies,
   962  		FetchChannel:              s.chanStateDB.FetchChannel,
   963  	}
   964  
   965  	utxnStore, err := contractcourt.NewNurseryStore(
   966  		&s.cfg.ActiveNetParams.GenesisHash, dbs.ChanStateDB,
   967  	)
   968  	if err != nil {
   969  		srvrLog.Errorf("unable to create nursery store: %v", err)
   970  		return nil, err
   971  	}
   972  
   973  	srvrLog.Tracef("Sweeper batch window duration: %v",
   974  		sweep.DefaultBatchWindowDuration)
   975  
   976  	sweeperStore, err := sweep.NewSweeperStore(
   977  		dbs.ChanStateDB, &s.cfg.ActiveNetParams.GenesisHash,
   978  	)
   979  	if err != nil {
   980  		srvrLog.Errorf("unable to create sweeper store: %v", err)
   981  		return nil, err
   982  	}
   983  
   984  	s.sweeper = sweep.New(&sweep.UtxoSweeperConfig{
   985  		FeeEstimator:   cc.FeeEstimator,
   986  		GenSweepScript: newSweepPkScriptGen(cc.Wallet),
   987  		Signer:         cc.Wallet.Cfg.Signer,
   988  		Wallet:         cc.Wallet,
   989  		NewBatchTimer: func() <-chan time.Time {
   990  			return time.NewTimer(sweep.DefaultBatchWindowDuration).C
   991  		},
   992  		Notifier:             cc.ChainNotifier,
   993  		Store:                sweeperStore,
   994  		MaxInputsPerTx:       sweep.DefaultMaxInputsPerTx,
   995  		MaxSweepAttempts:     sweep.DefaultMaxSweepAttempts,
   996  		NextAttemptDeltaFunc: sweep.DefaultNextAttemptDeltaFunc,
   997  		NetParams:            s.cfg.ActiveNetParams.Params,
   998  		MaxFeeRate:           sweep.DefaultMaxFeeRate,
   999  		FeeRateBucketSize:    sweep.DefaultFeeRateBucketSize,
  1000  	})
  1001  
  1002  	s.utxoNursery = contractcourt.NewUtxoNursery(&contractcourt.NurseryConfig{
  1003  		ChainIO:             cc.ChainIO,
  1004  		ConfDepth:           1,
  1005  		FetchClosedChannels: s.chanStateDB.FetchClosedChannels,
  1006  		FetchClosedChannel:  s.chanStateDB.FetchClosedChannel,
  1007  		Notifier:            cc.ChainNotifier,
  1008  		PublishTransaction:  cc.Wallet.PublishTransaction,
  1009  		Store:               utxnStore,
  1010  		SweepInput:          s.sweeper.SweepInput,
  1011  	})
  1012  
  1013  	// Construct a closure that wraps the htlcswitch's CloseLink method.
  1014  	closeLink := func(chanPoint *wire.OutPoint,
  1015  		closureType contractcourt.ChannelCloseType) {
  1016  		// TODO(conner): Properly respect the update and error channels
  1017  		// returned by CloseLink.
  1018  
  1019  		// Instruct the switch to close the channel.  Provide no close out
  1020  		// delivery script or target fee per kw because user input is not
  1021  		// available when the remote peer closes the channel.
  1022  		s.htlcSwitch.CloseLink(chanPoint, closureType, 0, nil)
  1023  	}
  1024  
  1025  	// We will use the following channel to reliably hand off contract
  1026  	// breach events from the ChannelArbitrator to the contractcourt.BreachArbiter,
  1027  	contractBreaches := make(chan *contractcourt.ContractBreachEvent, 1)
  1028  
  1029  	s.breachArbiter = contractcourt.NewBreachArbiter(&contractcourt.BreachConfig{
  1030  		CloseLink:          closeLink,
  1031  		DB:                 s.chanStateDB,
  1032  		Estimator:          s.cc.FeeEstimator,
  1033  		GenSweepScript:     newSweepPkScriptGen(cc.Wallet),
  1034  		Notifier:           cc.ChainNotifier,
  1035  		PublishTransaction: cc.Wallet.PublishTransaction,
  1036  		ContractBreaches:   contractBreaches,
  1037  		Signer:             cc.Wallet.Cfg.Signer,
  1038  		Store: contractcourt.NewRetributionStore(
  1039  			dbs.ChanStateDB,
  1040  		),
  1041  		NetParams: s.cfg.ActiveNetParams.Params,
  1042  	})
  1043  
  1044  	s.chainArb = contractcourt.NewChainArbitrator(contractcourt.ChainArbitratorConfig{
  1045  		ChainHash:              s.cfg.ActiveNetParams.GenesisHash,
  1046  		NetParams:              s.cfg.ActiveNetParams.Params,
  1047  		IncomingBroadcastDelta: lncfg.DefaultIncomingBroadcastDelta,
  1048  		OutgoingBroadcastDelta: lncfg.DefaultOutgoingBroadcastDelta,
  1049  		NewSweepAddr:           newSweepPkScriptGen(cc.Wallet),
  1050  		PublishTx:              cc.Wallet.PublishTransaction,
  1051  		DeliverResolutionMsg: func(msgs ...contractcourt.ResolutionMsg) error {
  1052  			for _, msg := range msgs {
  1053  				err := s.htlcSwitch.ProcessContractResolution(msg)
  1054  				if err != nil {
  1055  					return err
  1056  				}
  1057  			}
  1058  			return nil
  1059  		},
  1060  		IncubateOutputs: func(chanPoint wire.OutPoint,
  1061  			outHtlcRes *lnwallet.OutgoingHtlcResolution,
  1062  			inHtlcRes *lnwallet.IncomingHtlcResolution,
  1063  			broadcastHeight uint32) error {
  1064  
  1065  			var (
  1066  				inRes  []lnwallet.IncomingHtlcResolution
  1067  				outRes []lnwallet.OutgoingHtlcResolution
  1068  			)
  1069  			if inHtlcRes != nil {
  1070  				inRes = append(inRes, *inHtlcRes)
  1071  			}
  1072  			if outHtlcRes != nil {
  1073  				outRes = append(outRes, *outHtlcRes)
  1074  			}
  1075  
  1076  			return s.utxoNursery.IncubateOutputs(
  1077  				chanPoint, outRes, inRes,
  1078  				broadcastHeight,
  1079  			)
  1080  		},
  1081  		PreimageDB:   s.witnessBeacon,
  1082  		Notifier:     cc.ChainNotifier,
  1083  		Signer:       cc.Wallet.Cfg.Signer,
  1084  		FeeEstimator: cc.FeeEstimator,
  1085  		ChainIO:      cc.ChainIO,
  1086  		MarkLinkInactive: func(chanPoint wire.OutPoint) error {
  1087  			chanID := lnwire.NewChanIDFromOutPoint(&chanPoint)
  1088  			s.htlcSwitch.RemoveLink(chanID)
  1089  			return nil
  1090  		},
  1091  		IsOurAddress: cc.Wallet.IsOurAddress,
  1092  		ContractBreach: func(chanPoint wire.OutPoint,
  1093  			breachRet *lnwallet.BreachRetribution) error {
  1094  
  1095  			// processACK will handle the contractcourt.BreachArbiter ACKing the
  1096  			// event.
  1097  			finalErr := make(chan error, 1)
  1098  			processACK := func(brarErr error) {
  1099  				if brarErr != nil {
  1100  					finalErr <- brarErr
  1101  					return
  1102  				}
  1103  
  1104  				// If the breachArbiter successfully handled
  1105  				// the event, we can signal that the handoff
  1106  				// was successful.
  1107  				finalErr <- nil
  1108  			}
  1109  
  1110  			event := &contractcourt.ContractBreachEvent{
  1111  				ChanPoint:         chanPoint,
  1112  				ProcessACK:        processACK,
  1113  				BreachRetribution: breachRet,
  1114  			}
  1115  
  1116  			// Send the contract breach event to the contractcourt.BreachArbiter.
  1117  			select {
  1118  			case contractBreaches <- event:
  1119  			case <-s.quit:
  1120  				return ErrServerShuttingDown
  1121  			}
  1122  
  1123  			// We'll wait for a final error to be available from
  1124  			// the breachArbiter.
  1125  			select {
  1126  			case err := <-finalErr:
  1127  				return err
  1128  			case <-s.quit:
  1129  				return ErrServerShuttingDown
  1130  			}
  1131  		},
  1132  		DisableChannel: func(chanPoint wire.OutPoint) error {
  1133  			return s.chanStatusMgr.RequestDisable(chanPoint, false)
  1134  		},
  1135  		Sweeper:                       s.sweeper,
  1136  		Registry:                      s.invoices,
  1137  		NotifyClosedChannel:           s.channelNotifier.NotifyClosedChannelEvent,
  1138  		NotifyFullyResolvedChannel:    s.channelNotifier.NotifyFullyResolvedChannelEvent,
  1139  		OnionProcessor:                s.sphinx,
  1140  		PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod,
  1141  		IsForwardedHTLC:               s.htlcSwitch.IsForwardedHTLC,
  1142  		Clock:                         clock.NewDefaultClock(),
  1143  		SubscribeBreachComplete:       s.breachArbiter.SubscribeBreachComplete,
  1144  	}, dbs.ChanStateDB)
  1145  
  1146  	// Select the configuration and funding parameters for Decred
  1147  	chainCfg := cfg.Decred
  1148  	minRemoteDelay := funding.MinDcrRemoteDelay
  1149  	maxRemoteDelay := funding.MaxDcrRemoteDelay
  1150  
  1151  	var chanIDSeed [32]byte
  1152  	if _, err := rand.Read(chanIDSeed[:]); err != nil {
  1153  		return nil, err
  1154  	}
  1155  
  1156  	s.fundingMgr, err = funding.NewFundingManager(funding.Config{
  1157  		NoWumboChans:       !cfg.ProtocolOptions.Wumbo(),
  1158  		IDKey:              nodeKeyDesc.PubKey,
  1159  		IDKeyLoc:           nodeKeyDesc.KeyLocator,
  1160  		Wallet:             cc.Wallet,
  1161  		PublishTransaction: cc.Wallet.PublishTransaction,
  1162  		UpdateLabel: func(hash chainhash.Hash, label string) error {
  1163  			return cc.Wallet.LabelTransaction(hash, label, true)
  1164  		},
  1165  		Notifier:     cc.ChainNotifier,
  1166  		FeeEstimator: cc.FeeEstimator,
  1167  		SignMessage:  cc.MsgSigner.SignMessage,
  1168  		CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, error) {
  1169  			return s.genNodeAnnouncement(true)
  1170  		},
  1171  		SendAnnouncement: s.authGossiper.ProcessLocalAnnouncement,
  1172  		NotifyWhenOnline: s.NotifyWhenOnline,
  1173  		TempChanIDSeed:   chanIDSeed,
  1174  		FindChannel: func(chanID lnwire.ChannelID) (
  1175  			*channeldb.OpenChannel, error) {
  1176  
  1177  			dbChannels, err := s.chanStateDB.FetchAllChannels()
  1178  			if err != nil {
  1179  				return nil, err
  1180  			}
  1181  
  1182  			for _, channel := range dbChannels {
  1183  				if chanID.IsChanPoint(&channel.FundingOutpoint) {
  1184  					return channel, nil
  1185  				}
  1186  			}
  1187  
  1188  			return nil, fmt.Errorf("unable to find channel")
  1189  		},
  1190  		DefaultRoutingPolicy: cc.RoutingPolicy,
  1191  		DefaultMinHtlcIn:     cc.MinHtlcIn,
  1192  		NumRequiredConfs: func(chanAmt dcrutil.Amount,
  1193  			pushAmt lnwire.MilliAtom) uint16 {
  1194  			// For large channels we increase the number of
  1195  			// confirmations we require for the channel to be
  1196  			// considered open. As it is always the responder that
  1197  			// gets to choose value, the pushAmt is value being
  1198  			// pushed to us. This means we have more to lose in the
  1199  			// case this gets re-orged out, and we will require
  1200  			// more confirmations before we consider it open.
  1201  
  1202  			// In case the user has explicitly specified
  1203  			// a default value for the number of
  1204  			// confirmations, we use it.
  1205  			defaultConf := uint16(cfg.Decred.DefaultNumChanConfs)
  1206  			if defaultConf != 0 {
  1207  				return defaultConf
  1208  			}
  1209  
  1210  			minConf := uint64(3)
  1211  			maxConf := uint64(6)
  1212  
  1213  			// If this is a wumbo channel, then we'll require the
  1214  			// max amount of confirmations.
  1215  			if chanAmt > MaxFundingAmount {
  1216  				return uint16(maxConf)
  1217  			}
  1218  
  1219  			// If not we return a value scaled linearly
  1220  			// between 3 and 6, depending on channel size.
  1221  			// TODO(halseth): Use 1 as minimum?
  1222  			maxChannelSize := uint64(
  1223  				lnwire.NewMAtomsFromAtoms(MaxFundingAmount))
  1224  			stake := lnwire.NewMAtomsFromAtoms(chanAmt) + pushAmt
  1225  			conf := maxConf * uint64(stake) / maxChannelSize
  1226  			if conf < minConf {
  1227  				conf = minConf
  1228  			}
  1229  			if conf > maxConf {
  1230  				conf = maxConf
  1231  			}
  1232  			return uint16(conf)
  1233  		},
  1234  		RequiredRemoteDelay: func(chanAmt dcrutil.Amount) uint16 {
  1235  			// We scale the remote CSV delay (the time the
  1236  			// remote have to claim funds in case of a unilateral
  1237  			// close) linearly from minRemoteDelay blocks
  1238  			// for small channels, to maxRemoteDelay blocks for
  1239  			// channels of size MaxFundingAmount.
  1240  
  1241  			// In case the user has explicitly specified
  1242  			// a default value for the remote delay, we
  1243  			// use it.
  1244  			defaultDelay := uint16(cfg.Decred.DefaultRemoteDelay)
  1245  			if defaultDelay > 0 {
  1246  				return defaultDelay
  1247  			}
  1248  
  1249  			// If this is a wumbo channel, then we'll require the
  1250  			// max value.
  1251  			if chanAmt > MaxFundingAmount {
  1252  				return maxRemoteDelay
  1253  			}
  1254  
  1255  			// If not we scale according to channel size.
  1256  			delay := uint16(dcrutil.Amount(maxRemoteDelay) *
  1257  				chanAmt / MaxFundingAmount)
  1258  			if delay < minRemoteDelay {
  1259  				delay = minRemoteDelay
  1260  			}
  1261  			if delay > maxRemoteDelay {
  1262  				delay = maxRemoteDelay
  1263  			}
  1264  			return delay
  1265  		},
  1266  		WatchNewChannel: func(channel *channeldb.OpenChannel,
  1267  			peerKey *secp256k1.PublicKey) error {
  1268  
  1269  			// First, we'll mark this new peer as a persistent peer
  1270  			// for re-connection purposes. If the peer is not yet
  1271  			// tracked or the user hasn't requested it to be perm,
  1272  			// we'll set false to prevent the server from continuing
  1273  			// to connect to this peer even if the number of
  1274  			// channels with this peer is zero.
  1275  			s.mu.Lock()
  1276  			pubStr := string(peerKey.SerializeCompressed())
  1277  			if _, ok := s.persistentPeers[pubStr]; !ok {
  1278  				s.persistentPeers[pubStr] = false
  1279  			}
  1280  			s.mu.Unlock()
  1281  
  1282  			// With that taken care of, we'll send this channel to
  1283  			// the chain arb so it can react to on-chain events.
  1284  			return s.chainArb.WatchNewChannel(channel)
  1285  		},
  1286  		ReportShortChanID: func(chanPoint wire.OutPoint) error {
  1287  			cid := lnwire.NewChanIDFromOutPoint(&chanPoint)
  1288  			return s.htlcSwitch.UpdateShortChanID(cid)
  1289  		},
  1290  		RequiredRemoteChanReserve: func(chanAmt,
  1291  			dustLimit dcrutil.Amount) dcrutil.Amount {
  1292  
  1293  			// By default, we'll require the remote peer to maintain
  1294  			// at least 1% of the total channel capacity at all
  1295  			// times. If this value ends up dipping below the dust
  1296  			// limit, then we'll use the dust limit itself as the
  1297  			// reserve as required by BOLT #2.
  1298  			reserve := chanAmt / 100
  1299  			if reserve < dustLimit {
  1300  				reserve = dustLimit
  1301  			}
  1302  
  1303  			return reserve
  1304  		},
  1305  		RequiredRemoteMaxValue: func(chanAmt dcrutil.Amount) lnwire.MilliAtom {
  1306  			// By default, we'll allow the remote peer to fully
  1307  			// utilize the full bandwidth of the channel, minus our
  1308  			// required reserve.
  1309  			reserve := lnwire.NewMAtomsFromAtoms(chanAmt / 100)
  1310  			return lnwire.NewMAtomsFromAtoms(chanAmt) - reserve
  1311  		},
  1312  		RequiredRemoteMaxHTLCs: func(chanAmt dcrutil.Amount) uint16 {
  1313  			if cfg.DefaultRemoteMaxHtlcs > 0 {
  1314  				return cfg.DefaultRemoteMaxHtlcs
  1315  			}
  1316  
  1317  			// By default, we'll permit them to utilize the full
  1318  			// channel bandwidth.
  1319  			return uint16(input.MaxHTLCNumber / 2)
  1320  		},
  1321  		ZombieSweeperInterval:         1 * time.Minute,
  1322  		ReservationTimeout:            10 * time.Minute,
  1323  		MinChanSize:                   dcrutil.Amount(cfg.MinChanSize),
  1324  		MaxChanSize:                   dcrutil.Amount(cfg.MaxChanSize),
  1325  		MaxPendingChannels:            cfg.MaxPendingChannels,
  1326  		RejectPush:                    cfg.RejectPush,
  1327  		MaxLocalCSVDelay:              chainCfg.MaxLocalDelay,
  1328  		NotifyOpenChannelEvent:        s.channelNotifier.NotifyOpenChannelEvent,
  1329  		OpenChannelPredicate:          chanPredicate,
  1330  		NotifyPendingOpenChannelEvent: s.channelNotifier.NotifyPendingOpenChannelEvent,
  1331  		EnableUpfrontShutdown:         cfg.EnableUpfrontShutdown,
  1332  		RegisteredChains:              cfg.registeredChains,
  1333  		MaxAnchorsCommitFeeRate: chainfee.AtomPerKByte(
  1334  			s.cfg.MaxCommitFeeRateAnchors * 1000),
  1335  	})
  1336  	if err != nil {
  1337  		return nil, err
  1338  	}
  1339  
  1340  	// Next, we'll assemble the sub-system that will maintain an on-disk
  1341  	// static backup of the latest channel state.
  1342  	chanNotifier := &channelNotifier{
  1343  		chanNotifier: s.channelNotifier,
  1344  		addrs:        dbs.ChanStateDB,
  1345  	}
  1346  	backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath)
  1347  	startingChans, err := chanbackup.FetchStaticChanBackups(
  1348  		s.chanStateDB, s.addrSource,
  1349  	)
  1350  	if err != nil {
  1351  		return nil, err
  1352  	}
  1353  	s.chanSubSwapper, err = chanbackup.NewSubSwapper(
  1354  		startingChans, chanNotifier, s.cc.KeyRing, backupFile,
  1355  	)
  1356  	if err != nil {
  1357  		return nil, err
  1358  	}
  1359  
  1360  	// Assemble a peer notifier which will provide clients with subscriptions
  1361  	// to peer online and offline events.
  1362  	s.peerNotifier = peernotifier.New()
  1363  
  1364  	// Create a channel event store which monitors all open channels.
  1365  	s.chanEventStore = chanfitness.NewChannelEventStore(&chanfitness.Config{
  1366  		SubscribeChannelEvents: func() (subscribe.Subscription, error) {
  1367  			return s.channelNotifier.SubscribeChannelEvents()
  1368  		},
  1369  		SubscribePeerEvents: func() (subscribe.Subscription, error) {
  1370  			return s.peerNotifier.SubscribePeerEvents()
  1371  		},
  1372  		GetOpenChannels: s.chanStateDB.FetchAllOpenChannels,
  1373  		Clock:           clock.NewDefaultClock(),
  1374  		ReadFlapCount:   s.miscDB.ReadFlapCount,
  1375  		WriteFlapCount:  s.miscDB.WriteFlapCounts,
  1376  		FlapCountTicker: ticker.New(chanfitness.FlapCountFlushRate),
  1377  	})
  1378  
  1379  	if cfg.WtClient.Active {
  1380  		policy := wtpolicy.DefaultPolicy()
  1381  
  1382  		if cfg.WtClient.SweepFeeRate != 0 {
  1383  			// We expose the sweep fee rate in atom/byte, but the
  1384  			// tower protocol operations on atom/KB.
  1385  			sweepRateAtomPerByte := chainfee.AtomPerKByte(
  1386  				1000 * cfg.WtClient.SweepFeeRate,
  1387  			)
  1388  			policy.SweepFeeRate = sweepRateAtomPerByte
  1389  		}
  1390  
  1391  		if err := policy.Validate(); err != nil {
  1392  			return nil, err
  1393  		}
  1394  
  1395  		// authDial is the wrapper around the btrontide.Dial for the
  1396  		// watchtower.
  1397  		authDial := func(localKey keychain.SingleKeyECDH,
  1398  			netAddr *lnwire.NetAddress,
  1399  			dialer tor.DialFunc) (wtserver.Peer, error) {
  1400  
  1401  			return brontide.Dial(
  1402  				localKey, netAddr, cfg.ConnectionTimeout, dialer,
  1403  			)
  1404  		}
  1405  
  1406  		s.towerClient, err = wtclient.New(&wtclient.Config{
  1407  			ChainParams:    s.cfg.ActiveNetParams.Params,
  1408  			Signer:         cc.Wallet.Cfg.Signer,
  1409  			NewAddress:     newSweepPkScriptGen(cc.Wallet),
  1410  			SecretKeyRing:  s.cc.KeyRing,
  1411  			Dial:           cfg.net.Dial,
  1412  			AuthDial:       authDial,
  1413  			DB:             dbs.TowerClientDB,
  1414  			Policy:         policy,
  1415  			ChainHash:      s.cfg.ActiveNetParams.GenesisHash,
  1416  			MinBackoff:     10 * time.Second,
  1417  			MaxBackoff:     5 * time.Minute,
  1418  			ForceQuitDelay: wtclient.DefaultForceQuitDelay,
  1419  		})
  1420  		if err != nil {
  1421  			return nil, err
  1422  		}
  1423  
  1424  		// Copy the policy for legacy channels and set the blob flag
  1425  		// signalling support for anchor channels.
  1426  		anchorPolicy := policy
  1427  		anchorPolicy.TxPolicy.BlobType |=
  1428  			blob.Type(blob.FlagAnchorChannel)
  1429  
  1430  		s.anchorTowerClient, err = wtclient.New(&wtclient.Config{
  1431  			ChainParams:    s.cfg.ActiveNetParams.Params,
  1432  			Signer:         cc.Wallet.Cfg.Signer,
  1433  			NewAddress:     newSweepPkScriptGen(cc.Wallet),
  1434  			SecretKeyRing:  s.cc.KeyRing,
  1435  			Dial:           cfg.net.Dial,
  1436  			AuthDial:       authDial,
  1437  			DB:             dbs.TowerClientDB,
  1438  			Policy:         anchorPolicy,
  1439  			ChainHash:      s.cfg.ActiveNetParams.GenesisHash,
  1440  			MinBackoff:     10 * time.Second,
  1441  			MaxBackoff:     5 * time.Minute,
  1442  			ForceQuitDelay: wtclient.DefaultForceQuitDelay,
  1443  		})
  1444  		if err != nil {
  1445  			return nil, err
  1446  		}
  1447  	}
  1448  
  1449  	if len(cfg.ExternalHosts) != 0 {
  1450  		advertisedIPs := make(map[string]struct{})
  1451  		for _, addr := range s.currentNodeAnn.Addresses {
  1452  			advertisedIPs[addr.String()] = struct{}{}
  1453  		}
  1454  
  1455  		s.hostAnn = netann.NewHostAnnouncer(netann.HostAnnouncerConfig{
  1456  			Hosts:         cfg.ExternalHosts,
  1457  			RefreshTicker: ticker.New(defaultHostSampleInterval),
  1458  			LookupHost: func(host string) (net.Addr, error) {
  1459  				return lncfg.ParseAddressString(
  1460  					host, strconv.Itoa(defaultPeerPort),
  1461  					cfg.net.ResolveTCPAddr,
  1462  				)
  1463  			},
  1464  			AdvertisedIPs:  advertisedIPs,
  1465  			AnnounceNewIPs: netann.IPAnnouncer(s.genNodeAnnouncement),
  1466  		})
  1467  	}
  1468  
  1469  	// Create liveliness monitor.
  1470  	s.createLivenessMonitor(cfg, cc)
  1471  
  1472  	// Create the connection manager which will be responsible for
  1473  	// maintaining persistent outbound connections and also accepting new
  1474  	// incoming connections
  1475  	cmgr, err := connmgr.New(&connmgr.Config{
  1476  		Listeners:      listeners,
  1477  		OnAccept:       s.InboundPeerConnected,
  1478  		RetryDuration:  time.Second * 5,
  1479  		TargetOutbound: 100,
  1480  		DialAddr:       noiseDial(nodeKeyECDH, s.cfg.net, s.cfg.ConnectionTimeout),
  1481  		OnConnection:   s.OutboundPeerConnected,
  1482  	})
  1483  	if err != nil {
  1484  		return nil, err
  1485  	}
  1486  	s.connMgr = cmgr
  1487  
  1488  	return s, nil
  1489  }
  1490  
  1491  // createLivenessMonitor creates a set of health checks using our configured
  1492  // values and uses these checks to create a liveliness monitor. Available
  1493  // health checks,
  1494  //   - chainHealthCheck
  1495  //   - diskCheck
  1496  //   - tlsHealthCheck
  1497  //   - torController, only created when tor is enabled.
  1498  //
  1499  // If a health check has been disabled by setting attempts to 0, our monitor
  1500  // will not run it.
  1501  func (s *server) createLivenessMonitor(cfg *Config, cc *chainreg.ChainControl) {
  1502  	chainHealthCheck := healthcheck.NewObservation(
  1503  		"chain backend",
  1504  		cc.HealthCheck,
  1505  		cfg.HealthChecks.ChainCheck.Interval,
  1506  		cfg.HealthChecks.ChainCheck.Timeout,
  1507  		cfg.HealthChecks.ChainCheck.Backoff,
  1508  		cfg.HealthChecks.ChainCheck.Attempts,
  1509  	)
  1510  
  1511  	diskCheck := healthcheck.NewObservation(
  1512  		"disk space",
  1513  		func() error {
  1514  			free, err := healthcheck.AvailableDiskSpaceRatio(
  1515  				cfg.LndDir,
  1516  			)
  1517  			if err != nil {
  1518  				return err
  1519  			}
  1520  
  1521  			// If we have more free space than we require,
  1522  			// we return a nil error.
  1523  			if free > cfg.HealthChecks.DiskCheck.RequiredRemaining {
  1524  				return nil
  1525  			}
  1526  
  1527  			return fmt.Errorf("require: %v free space, got: %v",
  1528  				cfg.HealthChecks.DiskCheck.RequiredRemaining,
  1529  				free)
  1530  		},
  1531  		cfg.HealthChecks.DiskCheck.Interval,
  1532  		cfg.HealthChecks.DiskCheck.Timeout,
  1533  		cfg.HealthChecks.DiskCheck.Backoff,
  1534  		cfg.HealthChecks.DiskCheck.Attempts,
  1535  	)
  1536  
  1537  	tlsHealthCheck := healthcheck.NewObservation(
  1538  		"tls",
  1539  		func() error {
  1540  			_, parsedCert, err := cert.LoadCert(
  1541  				cfg.TLSCertPath, cfg.TLSKeyPath,
  1542  			)
  1543  			if err != nil {
  1544  				return err
  1545  			}
  1546  
  1547  			// If the current time is passed the certificate's
  1548  			// expiry time, then it is considered expired
  1549  			if time.Now().After(parsedCert.NotAfter) {
  1550  				return fmt.Errorf("TLS certificate is "+
  1551  					"expired as of %v", parsedCert.NotAfter)
  1552  			}
  1553  
  1554  			// If the certificate is not outdated, no error needs
  1555  			// to be returned
  1556  			return nil
  1557  		},
  1558  		cfg.HealthChecks.TLSCheck.Interval,
  1559  		cfg.HealthChecks.TLSCheck.Timeout,
  1560  		cfg.HealthChecks.TLSCheck.Backoff,
  1561  		cfg.HealthChecks.TLSCheck.Attempts,
  1562  	)
  1563  
  1564  	checks := []*healthcheck.Observation{
  1565  		chainHealthCheck, diskCheck, tlsHealthCheck,
  1566  	}
  1567  
  1568  	// If Tor is enabled, add the healthcheck for tor connection.
  1569  	if s.torController != nil {
  1570  		torConnectionCheck := healthcheck.NewObservation(
  1571  			"tor connection",
  1572  			func() error {
  1573  				return healthcheck.CheckTorServiceStatus(
  1574  					s.torController,
  1575  					s.createNewHiddenService,
  1576  				)
  1577  			},
  1578  			cfg.HealthChecks.TorConnection.Interval,
  1579  			cfg.HealthChecks.TorConnection.Timeout,
  1580  			cfg.HealthChecks.TorConnection.Backoff,
  1581  			cfg.HealthChecks.TorConnection.Attempts,
  1582  		)
  1583  		checks = append(checks, torConnectionCheck)
  1584  	}
  1585  
  1586  	// If we have not disabled all of our health checks, we create a
  1587  	// liveliness monitor with our configured checks.
  1588  	s.livelinessMonitor = healthcheck.NewMonitor(
  1589  		&healthcheck.Config{
  1590  			Checks:   checks,
  1591  			Shutdown: srvrLog.Criticalf,
  1592  		},
  1593  	)
  1594  }
  1595  
  1596  // Started returns true if the server has been started, and false otherwise.
  1597  // NOTE: This function is safe for concurrent access.
  1598  func (s *server) Started() bool {
  1599  	return atomic.LoadInt32(&s.active) != 0
  1600  }
  1601  
  1602  // cleaner is used to aggregate "cleanup" functions during an operation that
  1603  // starts several subsystems. In case one of the subsystem fails to start
  1604  // and a proper resource cleanup is required, the "run" method achieves this
  1605  // by running all these added "cleanup" functions
  1606  type cleaner []func() error
  1607  
  1608  // add is used to add a cleanup function to be called when
  1609  // the run function is executed
  1610  func (c cleaner) add(cleanup func() error) cleaner {
  1611  	return append(c, cleanup)
  1612  }
  1613  
  1614  // run is used to run all the previousely added cleanup functions
  1615  func (c cleaner) run() {
  1616  	for i := len(c) - 1; i >= 0; i-- {
  1617  		if err := c[i](); err != nil {
  1618  			srvrLog.Infof("Cleanup failed: %v", err)
  1619  		}
  1620  	}
  1621  }
  1622  
  1623  // Start starts the main daemon server, all requested listeners, and any helper
  1624  // goroutines.
  1625  // NOTE: This function is safe for concurrent access.
  1626  func (s *server) Start() error {
  1627  	var startErr error
  1628  
  1629  	// If one sub system fails to start, the following code ensures that the
  1630  	// previous started ones are stopped. It also ensures a proper wallet
  1631  	// shutdown which is important for releasing its resources (boltdb, etc...)
  1632  	cleanup := cleaner{}
  1633  
  1634  	s.start.Do(func() {
  1635  		if err := s.customMessageServer.Start(); err != nil {
  1636  			startErr = err
  1637  			return
  1638  		}
  1639  		cleanup = cleanup.add(s.customMessageServer.Stop)
  1640  
  1641  		if s.hostAnn != nil {
  1642  			if err := s.hostAnn.Start(); err != nil {
  1643  				startErr = err
  1644  				return
  1645  			}
  1646  			cleanup = cleanup.add(s.hostAnn.Stop)
  1647  		}
  1648  
  1649  		if s.livelinessMonitor != nil {
  1650  			if err := s.livelinessMonitor.Start(); err != nil {
  1651  				startErr = err
  1652  				return
  1653  			}
  1654  			cleanup = cleanup.add(s.livelinessMonitor.Stop)
  1655  		}
  1656  
  1657  		// Start the notification server. This is used so channel
  1658  		// management goroutines can be notified when a funding
  1659  		// transaction reaches a sufficient number of confirmations, or
  1660  		// when the input for the funding transaction is spent in an
  1661  		// attempt at an uncooperative close by the counterparty.
  1662  		if err := s.sigPool.Start(); err != nil {
  1663  			startErr = err
  1664  			return
  1665  		}
  1666  		cleanup = cleanup.add(s.sigPool.Stop)
  1667  
  1668  		if err := s.writePool.Start(); err != nil {
  1669  			startErr = err
  1670  			return
  1671  		}
  1672  		cleanup = cleanup.add(s.writePool.Stop)
  1673  
  1674  		if err := s.readPool.Start(); err != nil {
  1675  			startErr = err
  1676  			return
  1677  		}
  1678  		cleanup = cleanup.add(s.readPool.Stop)
  1679  
  1680  		if err := s.cc.ChainNotifier.Start(); err != nil {
  1681  			startErr = err
  1682  			return
  1683  		}
  1684  		cleanup = cleanup.add(s.cc.ChainNotifier.Stop)
  1685  
  1686  		if err := s.channelNotifier.Start(); err != nil {
  1687  			startErr = err
  1688  			return
  1689  		}
  1690  		cleanup = cleanup.add(s.channelNotifier.Stop)
  1691  
  1692  		if err := s.peerNotifier.Start(); err != nil {
  1693  			startErr = err
  1694  			return
  1695  		}
  1696  		cleanup = cleanup.add(func() error {
  1697  			return s.peerNotifier.Stop()
  1698  		})
  1699  		if err := s.htlcNotifier.Start(); err != nil {
  1700  			startErr = err
  1701  			return
  1702  		}
  1703  		cleanup = cleanup.add(s.htlcNotifier.Stop)
  1704  
  1705  		if s.towerClient != nil {
  1706  			if err := s.towerClient.Start(); err != nil {
  1707  				startErr = err
  1708  				return
  1709  			}
  1710  			cleanup = cleanup.add(s.towerClient.Stop)
  1711  		}
  1712  		if s.anchorTowerClient != nil {
  1713  			if err := s.anchorTowerClient.Start(); err != nil {
  1714  				startErr = err
  1715  				return
  1716  			}
  1717  			cleanup = cleanup.add(s.anchorTowerClient.Stop)
  1718  		}
  1719  
  1720  		if err := s.sweeper.Start(); err != nil {
  1721  			startErr = err
  1722  			return
  1723  		}
  1724  		cleanup = cleanup.add(s.sweeper.Stop)
  1725  
  1726  		if err := s.utxoNursery.Start(); err != nil {
  1727  			startErr = err
  1728  			return
  1729  		}
  1730  		cleanup = cleanup.add(s.utxoNursery.Stop)
  1731  
  1732  		if err := s.breachArbiter.Start(); err != nil {
  1733  			startErr = err
  1734  			return
  1735  		}
  1736  		cleanup = cleanup.add(s.breachArbiter.Stop)
  1737  
  1738  		if err := s.fundingMgr.Start(); err != nil {
  1739  			startErr = err
  1740  			return
  1741  		}
  1742  		cleanup = cleanup.add(s.fundingMgr.Stop)
  1743  
  1744  		if err := s.chainArb.Start(); err != nil {
  1745  			startErr = err
  1746  			return
  1747  		}
  1748  		cleanup = cleanup.add(s.chainArb.Stop)
  1749  
  1750  		if err := s.authGossiper.Start(); err != nil {
  1751  			startErr = err
  1752  			return
  1753  		}
  1754  		cleanup = cleanup.add(s.authGossiper.Stop)
  1755  
  1756  		if err := s.chanRouter.Start(); err != nil {
  1757  			startErr = err
  1758  			return
  1759  		}
  1760  		cleanup = cleanup.add(s.chanRouter.Stop)
  1761  
  1762  		if err := s.invoices.Start(); err != nil {
  1763  			startErr = err
  1764  			return
  1765  		}
  1766  		cleanup = cleanup.add(s.invoices.Stop)
  1767  
  1768  		if err := s.sphinx.Start(); err != nil {
  1769  			startErr = err
  1770  			return
  1771  		}
  1772  		cleanup = cleanup.add(s.sphinx.Stop)
  1773  
  1774  		if err := s.htlcSwitch.Start(); err != nil {
  1775  			startErr = err
  1776  			return
  1777  		}
  1778  		cleanup = cleanup.add(s.htlcSwitch.Stop)
  1779  
  1780  		if err := s.chanStatusMgr.Start(); err != nil {
  1781  			startErr = err
  1782  			return
  1783  		}
  1784  		cleanup = cleanup.add(s.chanStatusMgr.Stop)
  1785  
  1786  		if err := s.chanEventStore.Start(); err != nil {
  1787  			startErr = err
  1788  			return
  1789  		}
  1790  		cleanup = cleanup.add(func() error {
  1791  			s.chanEventStore.Stop()
  1792  			return nil
  1793  		})
  1794  
  1795  		s.missionControl.RunStoreTicker()
  1796  		cleanup.add(func() error {
  1797  			s.missionControl.StopStoreTicker()
  1798  			return nil
  1799  		})
  1800  
  1801  		// Before we start the connMgr, we'll check to see if we have
  1802  		// any backups to recover. We do this now as we want to ensure
  1803  		// that have all the information we need to handle channel
  1804  		// recovery _before_ we even accept connections from any peers.
  1805  		chanRestorer := &chanDBRestorer{
  1806  			db:         s.chanStateDB,
  1807  			secretKeys: s.cc.KeyRing,
  1808  			chainArb:   s.chainArb,
  1809  		}
  1810  		if len(s.chansToRestore.PackedSingleChanBackups) != 0 {
  1811  			err := chanbackup.UnpackAndRecoverSingles(
  1812  				s.chansToRestore.PackedSingleChanBackups,
  1813  				s.cc.KeyRing, chanRestorer, s,
  1814  			)
  1815  			if err != nil {
  1816  				startErr = fmt.Errorf("unable to unpack single "+
  1817  					"backups: %v", err)
  1818  				return
  1819  			}
  1820  		}
  1821  		if len(s.chansToRestore.PackedMultiChanBackup) != 0 {
  1822  			err := chanbackup.UnpackAndRecoverMulti(
  1823  				s.chansToRestore.PackedMultiChanBackup,
  1824  				s.cc.KeyRing, chanRestorer, s,
  1825  			)
  1826  			if err != nil {
  1827  				startErr = fmt.Errorf("unable to unpack chan "+
  1828  					"backup: %v", err)
  1829  				return
  1830  			}
  1831  		}
  1832  
  1833  		if err := s.chanSubSwapper.Start(); err != nil {
  1834  			startErr = err
  1835  			return
  1836  		}
  1837  		cleanup = cleanup.add(s.chanSubSwapper.Stop)
  1838  
  1839  		if s.torController != nil {
  1840  			if err := s.createNewHiddenService(); err != nil {
  1841  				startErr = err
  1842  				return
  1843  			}
  1844  			cleanup = cleanup.add(s.torController.Stop)
  1845  		}
  1846  
  1847  		if s.natTraversal != nil {
  1848  			s.wg.Add(1)
  1849  			go s.watchExternalIP()
  1850  		}
  1851  
  1852  		// Subscribe to NodeAnnouncements that advertise new addresses
  1853  		// our persistent peers.
  1854  		if err := s.updatePersistentPeerAddrs(); err != nil {
  1855  			startErr = err
  1856  			return
  1857  		}
  1858  
  1859  		// With all the relevant sub-systems started, we'll now attempt
  1860  		// to establish persistent connections to our direct channel
  1861  		// collaborators within the network. Before doing so however,
  1862  		// we'll prune our set of link nodes found within the database
  1863  		// to ensure we don't reconnect to any nodes we no longer have
  1864  		// open channels with.
  1865  		if err := s.chanStateDB.PruneLinkNodes(); err != nil {
  1866  			startErr = err
  1867  			return
  1868  		}
  1869  
  1870  		// Start connmgr last to prevent connections before init.
  1871  		s.connMgr.Start()
  1872  		cleanup = cleanup.add(func() error {
  1873  			s.connMgr.Stop()
  1874  			return nil
  1875  		})
  1876  
  1877  		// Give enough time for any outstanding, brontide-accepted
  1878  		// connections to percolate through the connMgr and be
  1879  		// registered as potential peers before attempting outbound
  1880  		// connections.
  1881  		time.Sleep(time.Millisecond * 200)
  1882  
  1883  		if err := s.establishPersistentConnections(); err != nil {
  1884  			startErr = err
  1885  			return
  1886  		}
  1887  
  1888  		// setSeedList is a helper function that turns multiple DNS seed
  1889  		// server tuples from the command line or config file into the
  1890  		// data structure we need and does a basic formal sanity check
  1891  		// in the process.
  1892  		setSeedList := func(tuples []string, genesisHash chainhash.Hash) {
  1893  			if len(tuples) == 0 {
  1894  				return
  1895  			}
  1896  
  1897  			result := make([][2]string, len(tuples))
  1898  			for idx, tuple := range tuples {
  1899  				tuple = strings.TrimSpace(tuple)
  1900  				if len(tuple) == 0 {
  1901  					return
  1902  				}
  1903  
  1904  				servers := strings.Split(tuple, ",")
  1905  				if len(servers) > 2 || len(servers) == 0 {
  1906  					srvrLog.Warnf("Ignoring invalid DNS "+
  1907  						"seed tuple: %v", servers)
  1908  					return
  1909  				}
  1910  
  1911  				copy(result[idx][:], servers)
  1912  			}
  1913  
  1914  			chainreg.ChainDNSSeeds[genesisHash] = result
  1915  		}
  1916  
  1917  		// Let users overwrite the DNS seed nodes. We only allow them
  1918  		// for decred mainnet/testnet, all other combinations will just
  1919  		// be ignored.
  1920  		if !s.cfg.Decred.TestNet3 && !s.cfg.Decred.SimNet && !s.cfg.Decred.RegTest {
  1921  			setSeedList(
  1922  				s.cfg.Decred.DNSSeeds,
  1923  				chainreg.DecredMainNetParams.GenesisHash,
  1924  			)
  1925  		}
  1926  		if s.cfg.Decred.TestNet3 {
  1927  			setSeedList(
  1928  				s.cfg.Decred.DNSSeeds,
  1929  				chainreg.DecredTestNetParams.GenesisHash,
  1930  			)
  1931  		}
  1932  
  1933  		// If network bootstrapping hasn't been disabled, then we'll
  1934  		// configure the set of active bootstrappers, and launch a
  1935  		// dedicated goroutine to maintain a set of persistent
  1936  		// connections.
  1937  		if shouldPeerBootstrap(s.cfg) {
  1938  			bootstrappers, err := initNetworkBootstrappers(s)
  1939  			if err != nil {
  1940  				startErr = err
  1941  				return
  1942  			}
  1943  
  1944  			s.wg.Add(1)
  1945  			go s.peerBootstrapper(defaultMinPeers, bootstrappers)
  1946  		} else {
  1947  			srvrLog.Infof("Auto peer bootstrapping is disabled")
  1948  		}
  1949  
  1950  		// Set the active flag now that we've completed the full
  1951  		// startup.
  1952  		atomic.StoreInt32(&s.active, 1)
  1953  	})
  1954  
  1955  	if startErr != nil {
  1956  		cleanup.run()
  1957  	}
  1958  	return startErr
  1959  }
  1960  
  1961  // Stop gracefully shutsdown the main daemon server. This function will signal
  1962  // any active goroutines, or helper objects to exit, then blocks until they've
  1963  // all successfully exited. Additionally, any/all listeners are closed.
  1964  // NOTE: This function is safe for concurrent access.
  1965  func (s *server) Stop() error {
  1966  	s.stop.Do(func() {
  1967  		atomic.StoreInt32(&s.stopping, 1)
  1968  
  1969  		close(s.quit)
  1970  
  1971  		// Shutdown connMgr first to prevent conns during shutdown.
  1972  		s.connMgr.Stop()
  1973  
  1974  		// Shutdown the wallet, funding manager, and the rpc server.
  1975  		s.chanStatusMgr.Stop()
  1976  		if err := s.htlcSwitch.Stop(); err != nil {
  1977  			srvrLog.Warnf("failed to stop htlcSwitch: %v", err)
  1978  		}
  1979  		if err := s.sphinx.Stop(); err != nil {
  1980  			srvrLog.Warnf("failed to stop sphinx: %v", err)
  1981  		}
  1982  		if err := s.invoices.Stop(); err != nil {
  1983  			srvrLog.Warnf("failed to stop invoices: %v", err)
  1984  		}
  1985  		if err := s.chanRouter.Stop(); err != nil {
  1986  			srvrLog.Warnf("failed to stop chanRouter: %v", err)
  1987  		}
  1988  		if err := s.chainArb.Stop(); err != nil {
  1989  			srvrLog.Warnf("failed to stop chainArb: %v", err)
  1990  		}
  1991  		if err := s.fundingMgr.Stop(); err != nil {
  1992  			srvrLog.Warnf("failed to stop fundingMgr: %v", err)
  1993  		}
  1994  		if err := s.breachArbiter.Stop(); err != nil {
  1995  			srvrLog.Warnf("failed to stop contractcourt.BreachArbiter: %v", err)
  1996  		}
  1997  		if err := s.utxoNursery.Stop(); err != nil {
  1998  			srvrLog.Warnf("failed to stop contractcourt.UtxoNursery: %v", err)
  1999  		}
  2000  		if err := s.authGossiper.Stop(); err != nil {
  2001  			srvrLog.Warnf("failed to stop authGossiper: %v", err)
  2002  		}
  2003  		if err := s.sweeper.Stop(); err != nil {
  2004  			srvrLog.Warnf("failed to stop sweeper: %v", err)
  2005  		}
  2006  		if err := s.channelNotifier.Stop(); err != nil {
  2007  			srvrLog.Warnf("failed to stop channelNotifier: %v", err)
  2008  		}
  2009  		if err := s.peerNotifier.Stop(); err != nil {
  2010  			srvrLog.Warnf("failed to stop peerNotifier: %v", err)
  2011  		}
  2012  		if err := s.htlcNotifier.Stop(); err != nil {
  2013  			srvrLog.Warnf("failed to stop htlcNotifier: %v", err)
  2014  		}
  2015  		if err := s.chanSubSwapper.Stop(); err != nil {
  2016  			srvrLog.Warnf("failed to stop chanSubSwapper: %v", err)
  2017  		}
  2018  		if err := s.cc.ChainNotifier.Stop(); err != nil {
  2019  			srvrLog.Warnf("Unable to stop ChainNotifier: %v", err)
  2020  		}
  2021  		s.chanEventStore.Stop()
  2022  		s.missionControl.StopStoreTicker()
  2023  
  2024  		// Disconnect from each active peers to ensure that
  2025  		// peerTerminationWatchers signal completion to each peer.
  2026  		for _, peer := range s.Peers() {
  2027  			err := s.DisconnectPeer(peer.IdentityKey())
  2028  			if err != nil {
  2029  				srvrLog.Warnf("could not disconnect peer: %v"+
  2030  					"received error: %v", peer.IdentityKey(),
  2031  					err,
  2032  				)
  2033  			}
  2034  		}
  2035  
  2036  		// Now that all connections have been torn down, stop the tower
  2037  		// client which will reliably flush all queued states to the
  2038  		// tower. If this is halted for any reason, the force quit timer
  2039  		// will kick in and abort to allow this method to return.
  2040  		if s.towerClient != nil {
  2041  			if err := s.towerClient.Stop(); err != nil {
  2042  				srvrLog.Warnf("Unable to shut down tower "+
  2043  					"client: %v", err)
  2044  			}
  2045  		}
  2046  		if s.anchorTowerClient != nil {
  2047  			if err := s.anchorTowerClient.Stop(); err != nil {
  2048  				srvrLog.Warnf("Unable to shut down anchor "+
  2049  					"tower client: %v", err)
  2050  			}
  2051  		}
  2052  
  2053  		if s.hostAnn != nil {
  2054  			if err := s.hostAnn.Stop(); err != nil {
  2055  				srvrLog.Warnf("unable to shut down host "+
  2056  					"annoucner: %v", err)
  2057  			}
  2058  		}
  2059  
  2060  		if s.livelinessMonitor != nil {
  2061  			if err := s.livelinessMonitor.Stop(); err != nil {
  2062  				srvrLog.Warnf("unable to shutdown liveliness "+
  2063  					"monitor: %v", err)
  2064  			}
  2065  		}
  2066  
  2067  		// Wait for all lingering goroutines to quit.
  2068  		s.wg.Wait()
  2069  
  2070  		s.sigPool.Stop()
  2071  		s.writePool.Stop()
  2072  		s.readPool.Stop()
  2073  	})
  2074  
  2075  	return nil
  2076  }
  2077  
  2078  // Stopped returns true if the server has been instructed to shutdown.
  2079  // NOTE: This function is safe for concurrent access.
  2080  func (s *server) Stopped() bool {
  2081  	return atomic.LoadInt32(&s.stopping) != 0
  2082  }
  2083  
  2084  // configurePortForwarding attempts to set up port forwarding for the different
  2085  // ports that the server will be listening on.
  2086  //
  2087  // NOTE: This should only be used when using some kind of NAT traversal to
  2088  // automatically set up forwarding rules.
  2089  func (s *server) configurePortForwarding(ports ...uint16) ([]string, error) {
  2090  	ip, err := s.natTraversal.ExternalIP()
  2091  	if err != nil {
  2092  		return nil, err
  2093  	}
  2094  	s.lastDetectedIP = ip
  2095  
  2096  	externalIPs := make([]string, 0, len(ports))
  2097  	for _, port := range ports {
  2098  		if err := s.natTraversal.AddPortMapping(port); err != nil {
  2099  			srvrLog.Debugf("Unable to forward port %d: %v", port, err)
  2100  			continue
  2101  		}
  2102  
  2103  		hostIP := fmt.Sprintf("%v:%d", ip, port)
  2104  		externalIPs = append(externalIPs, hostIP)
  2105  	}
  2106  
  2107  	return externalIPs, nil
  2108  }
  2109  
  2110  // removePortForwarding attempts to clear the forwarding rules for the different
  2111  // ports the server is currently listening on.
  2112  //
  2113  // NOTE: This should only be used when using some kind of NAT traversal to
  2114  // automatically set up forwarding rules.
  2115  func (s *server) removePortForwarding() {
  2116  	forwardedPorts := s.natTraversal.ForwardedPorts()
  2117  	for _, port := range forwardedPorts {
  2118  		if err := s.natTraversal.DeletePortMapping(port); err != nil {
  2119  			srvrLog.Errorf("Unable to remove forwarding rules for "+
  2120  				"port %d: %v", port, err)
  2121  		}
  2122  	}
  2123  }
  2124  
  2125  // watchExternalIP continuously checks for an updated external IP address every
  2126  // 15 minutes. Once a new IP address has been detected, it will automatically
  2127  // handle port forwarding rules and send updated node announcements to the
  2128  // currently connected peers.
  2129  //
  2130  // NOTE: This MUST be run as a goroutine.
  2131  func (s *server) watchExternalIP() {
  2132  	defer s.wg.Done()
  2133  
  2134  	// Before exiting, we'll make sure to remove the forwarding rules set
  2135  	// up by the server.
  2136  	defer s.removePortForwarding()
  2137  
  2138  	// Keep track of the external IPs set by the user to avoid replacing
  2139  	// them when detecting a new IP.
  2140  	ipsSetByUser := make(map[string]struct{})
  2141  	for _, ip := range s.cfg.ExternalIPs {
  2142  		ipsSetByUser[ip.String()] = struct{}{}
  2143  	}
  2144  
  2145  	forwardedPorts := s.natTraversal.ForwardedPorts()
  2146  
  2147  	ticker := time.NewTicker(15 * time.Minute)
  2148  	defer ticker.Stop()
  2149  out:
  2150  	for {
  2151  		select {
  2152  		case <-ticker.C:
  2153  			// We'll start off by making sure a new IP address has
  2154  			// been detected.
  2155  			ip, err := s.natTraversal.ExternalIP()
  2156  			if err != nil {
  2157  				srvrLog.Debugf("Unable to retrieve the "+
  2158  					"external IP address: %v", err)
  2159  				continue
  2160  			}
  2161  
  2162  			// Periodically renew the NAT port forwarding.
  2163  			for _, port := range forwardedPorts {
  2164  				err := s.natTraversal.AddPortMapping(port)
  2165  				if err != nil {
  2166  					srvrLog.Warnf("Unable to automatically "+
  2167  						"re-create port forwarding using %s: %v",
  2168  						s.natTraversal.Name(), err)
  2169  				} else {
  2170  					srvrLog.Debugf("Automatically re-created "+
  2171  						"forwarding for port %d using %s to "+
  2172  						"advertise external IP",
  2173  						port, s.natTraversal.Name())
  2174  				}
  2175  			}
  2176  
  2177  			if ip.Equal(s.lastDetectedIP) {
  2178  				continue
  2179  			}
  2180  
  2181  			srvrLog.Infof("Detected new external IP address %s", ip)
  2182  
  2183  			// Next, we'll craft the new addresses that will be
  2184  			// included in the new node announcement and advertised
  2185  			// to the network. Each address will consist of the new
  2186  			// IP detected and one of the currently advertised
  2187  			// ports.
  2188  			var newAddrs []net.Addr
  2189  			for _, port := range forwardedPorts {
  2190  				hostIP := fmt.Sprintf("%v:%d", ip, port)
  2191  				addr, err := net.ResolveTCPAddr("tcp", hostIP)
  2192  				if err != nil {
  2193  					srvrLog.Debugf("Unable to resolve "+
  2194  						"host %v: %v", addr, err)
  2195  					continue
  2196  				}
  2197  
  2198  				newAddrs = append(newAddrs, addr)
  2199  			}
  2200  
  2201  			// Skip the update if we weren't able to resolve any of
  2202  			// the new addresses.
  2203  			if len(newAddrs) == 0 {
  2204  				srvrLog.Debug("Skipping node announcement " +
  2205  					"update due to not being able to " +
  2206  					"resolve any new addresses")
  2207  				continue
  2208  			}
  2209  
  2210  			// Now, we'll need to update the addresses in our node's
  2211  			// announcement in order to propagate the update
  2212  			// throughout the network. We'll only include addresses
  2213  			// that have a different IP from the previous one, as
  2214  			// the previous IP is no longer valid.
  2215  			currentNodeAnn, err := s.genNodeAnnouncement(false)
  2216  			if err != nil {
  2217  				srvrLog.Debugf("Unable to retrieve current "+
  2218  					"node announcement: %v", err)
  2219  				continue
  2220  			}
  2221  			for _, addr := range currentNodeAnn.Addresses {
  2222  				host, _, err := net.SplitHostPort(addr.String())
  2223  				if err != nil {
  2224  					srvrLog.Debugf("Unable to determine "+
  2225  						"host from address %v: %v",
  2226  						addr, err)
  2227  					continue
  2228  				}
  2229  
  2230  				// We'll also make sure to include external IPs
  2231  				// set manually by the user.
  2232  				_, setByUser := ipsSetByUser[addr.String()]
  2233  				if setByUser || host != s.lastDetectedIP.String() {
  2234  					newAddrs = append(newAddrs, addr)
  2235  				}
  2236  			}
  2237  
  2238  			// Then, we'll generate a new timestamped node
  2239  			// announcement with the updated addresses and broadcast
  2240  			// it to our peers.
  2241  			newNodeAnn, err := s.genNodeAnnouncement(
  2242  				true, netann.NodeAnnSetAddrs(newAddrs),
  2243  			)
  2244  			if err != nil {
  2245  				srvrLog.Debugf("Unable to generate new node "+
  2246  					"announcement: %v", err)
  2247  				continue
  2248  			}
  2249  
  2250  			err = s.BroadcastMessage(nil, &newNodeAnn)
  2251  			if err != nil {
  2252  				srvrLog.Debugf("Unable to broadcast new node "+
  2253  					"announcement to peers: %v", err)
  2254  				continue
  2255  			}
  2256  
  2257  			// Finally, update the last IP seen to the current one.
  2258  			s.lastDetectedIP = ip
  2259  		case <-s.quit:
  2260  			break out
  2261  		}
  2262  	}
  2263  }
  2264  
  2265  // initNetworkBootstrappers initializes a set of network peer bootstrappers
  2266  // based on the server, and currently active bootstrap mechanisms as defined
  2267  // within the current configuration.
  2268  func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, error) {
  2269  	srvrLog.Infof("Initializing peer network bootstrappers!")
  2270  
  2271  	var bootStrappers []discovery.NetworkPeerBootstrapper
  2272  
  2273  	// First, we'll create an instance of the ChannelGraphBootstrapper as
  2274  	// this can be used by default if we've already partially seeded the
  2275  	// network.
  2276  	chanGraph := autopilot.ChannelGraphFromDatabase(s.graphDB)
  2277  	graphBootstrapper, err := discovery.NewGraphBootstrapper(chanGraph)
  2278  	if err != nil {
  2279  		return nil, err
  2280  	}
  2281  	bootStrappers = append(bootStrappers, graphBootstrapper)
  2282  
  2283  	// If this isn't simnet mode, then one of our additional bootstrapping
  2284  	// sources will be the set of running DNS seeds.
  2285  	if !s.cfg.Decred.SimNet {
  2286  		dnsSeeds, ok := chainreg.ChainDNSSeeds[s.cfg.ActiveNetParams.GenesisHash]
  2287  
  2288  		// If we have a set of DNS seeds for this chain, then we'll add
  2289  		// it as an additional bootstrapping source.
  2290  		if ok {
  2291  			srvrLog.Infof("Creating DNS peer bootstrapper with "+
  2292  				"seeds: %v", dnsSeeds)
  2293  
  2294  			dnsBootStrapper := discovery.NewDNSSeedBootstrapper(
  2295  				dnsSeeds, s.cfg.net, s.cfg.ConnectionTimeout,
  2296  			)
  2297  			bootStrappers = append(bootStrappers, dnsBootStrapper)
  2298  		}
  2299  	}
  2300  
  2301  	return bootStrappers, nil
  2302  }
  2303  
  2304  // createBootstrapIgnorePeers creates a map of peers that the bootstrap process
  2305  // needs to ignore, which is made of three parts,
  2306  //   - the node itself needs to be skipped as it doesn't make sense to connect
  2307  //     to itself.
  2308  //   - the peers that already have connections with, as in s.peersByPub.
  2309  //   - the peers that we are attempting to connect, as in s.persistentPeers.
  2310  func (s *server) createBootstrapIgnorePeers() map[autopilot.NodeID]struct{} {
  2311  	s.mu.RLock()
  2312  	defer s.mu.RUnlock()
  2313  
  2314  	ignore := make(map[autopilot.NodeID]struct{})
  2315  
  2316  	// We should ignore ourselves from bootstrapping.
  2317  	selfKey := autopilot.NewNodeID(s.identityECDH.PubKey())
  2318  	ignore[selfKey] = struct{}{}
  2319  
  2320  	// Ignore all connected peers.
  2321  	for _, peer := range s.peersByPub {
  2322  		nID := autopilot.NewNodeID(peer.IdentityKey())
  2323  		ignore[nID] = struct{}{}
  2324  	}
  2325  
  2326  	// Ignore all persistent peers as they have a dedicated reconnecting
  2327  	// process.
  2328  	for pubKeyStr := range s.persistentPeers {
  2329  		var nID autopilot.NodeID
  2330  		copy(nID[:], []byte(pubKeyStr))
  2331  		ignore[nID] = struct{}{}
  2332  	}
  2333  
  2334  	return ignore
  2335  }
  2336  
  2337  // peerBootstrapper is a goroutine which is tasked with attempting to establish
  2338  // and maintain a target minimum number of outbound connections. With this
  2339  // invariant, we ensure that our node is connected to a diverse set of peers
  2340  // and that nodes newly joining the network receive an up to date network view
  2341  // as soon as possible.
  2342  func (s *server) peerBootstrapper(numTargetPeers uint32,
  2343  	bootstrappers []discovery.NetworkPeerBootstrapper) {
  2344  
  2345  	defer s.wg.Done()
  2346  
  2347  	// Create a context so that initial DNS bootstrapping can be canceled in
  2348  	// case of shutdown.
  2349  	//
  2350  	// This follows a different style for signalling shutdown than other
  2351  	// services of the node (which use Start/Stop calls). Ideally, bootstrapping
  2352  	// should be refactored to follow the same style.
  2353  	ctx, cancel := context.WithCancel(context.Background())
  2354  	go func() {
  2355  		// The main loop of peerBootstrapper() only returns once the node has
  2356  		// been signalled to close, so it's safe to call cancel() only then.
  2357  		<-s.quit
  2358  		cancel()
  2359  	}()
  2360  
  2361  	// Before we continue, init the ignore peers map.
  2362  	ignoreList := s.createBootstrapIgnorePeers()
  2363  
  2364  	// We'll start off by aggressively attempting connections to peers in
  2365  	// order to be a part of the network as soon as possible.
  2366  	s.initialPeerBootstrap(ctx, ignoreList, numTargetPeers, bootstrappers)
  2367  
  2368  	// Once done, we'll attempt to maintain our target minimum number of
  2369  	// peers.
  2370  	//
  2371  	// We'll use a 15 second backoff, and double the time every time an
  2372  	// epoch fails up to a ceiling.
  2373  	backOff := time.Second * 15
  2374  
  2375  	// We'll create a new ticker to wake us up every 15 seconds so we can
  2376  	// see if we've reached our minimum number of peers.
  2377  	sampleTicker := time.NewTicker(backOff)
  2378  	defer sampleTicker.Stop()
  2379  
  2380  	// We'll use the number of attempts and errors to determine if we need
  2381  	// to increase the time between discovery epochs.
  2382  	var epochErrors uint32 // To be used atomically.
  2383  	var epochAttempts uint32
  2384  
  2385  	for {
  2386  		select {
  2387  		// The ticker has just woken us up, so we'll need to check if
  2388  		// we need to attempt to connect our to any more peers.
  2389  		case <-sampleTicker.C:
  2390  			// Obtain the current number of peers, so we can gauge
  2391  			// if we need to sample more peers or not.
  2392  			s.mu.RLock()
  2393  			numActivePeers := uint32(len(s.peersByPub))
  2394  			s.mu.RUnlock()
  2395  
  2396  			// If we have enough peers, then we can loop back
  2397  			// around to the next round as we're done here.
  2398  			if numActivePeers >= numTargetPeers {
  2399  				continue
  2400  			}
  2401  
  2402  			// If all of our attempts failed during this last back
  2403  			// off period, then will increase our backoff to 5
  2404  			// minute ceiling to avoid an excessive number of
  2405  			// queries
  2406  			//
  2407  			// TODO(roasbeef): add reverse policy too?
  2408  
  2409  			if epochAttempts > 0 &&
  2410  				atomic.LoadUint32(&epochErrors) >= epochAttempts {
  2411  
  2412  				sampleTicker.Stop()
  2413  
  2414  				backOff *= 2
  2415  				if backOff > bootstrapBackOffCeiling {
  2416  					backOff = bootstrapBackOffCeiling
  2417  				}
  2418  
  2419  				// Reset the counters for the next epoch bump.
  2420  				epochAttempts = 0
  2421  				atomic.StoreUint32(&epochErrors, 0)
  2422  
  2423  				srvrLog.Debugf("Backing off peer bootstrapper to "+
  2424  					"%v", backOff)
  2425  				sampleTicker = time.NewTicker(backOff)
  2426  				continue
  2427  			}
  2428  
  2429  			atomic.StoreUint32(&epochErrors, 0)
  2430  			epochAttempts = 0
  2431  
  2432  			// Since we know need more peers, we'll compute the
  2433  			// exact number we need to reach our threshold.
  2434  			numNeeded := numTargetPeers - numActivePeers
  2435  
  2436  			srvrLog.Debugf("Attempting to obtain %v more network "+
  2437  				"peers", numNeeded)
  2438  
  2439  			// With the number of peers we need calculated, we'll
  2440  			// query the network bootstrappers to sample a set of
  2441  			// random addrs for us.
  2442  			//
  2443  			// Before we continue, get a copy of the ignore peers
  2444  			// map.
  2445  			ignoreList = s.createBootstrapIgnorePeers()
  2446  
  2447  			peerAddrs, err := discovery.MultiSourceBootstrap(
  2448  				ctx, ignoreList, numNeeded*2, bootstrappers...,
  2449  			)
  2450  			if err == discovery.ErrNoAddressesFound {
  2451  				srvrLog.Errorf("No addresses returned by " +
  2452  					"boostrappers. Bumping attempt count.")
  2453  				epochAttempts++
  2454  				atomic.AddUint32(&epochErrors, 1)
  2455  				continue
  2456  			}
  2457  			if err != nil {
  2458  				srvrLog.Errorf("Unable to retrieve bootstrap "+
  2459  					"peers: %v", err)
  2460  				continue
  2461  			}
  2462  
  2463  			// Finally, we'll launch a new goroutine for each
  2464  			// prospective peer candidates.
  2465  			for _, addr := range peerAddrs {
  2466  				epochAttempts++
  2467  
  2468  				go func(a *lnwire.NetAddress) {
  2469  					// TODO(roasbeef): can do AS, subnet,
  2470  					// country diversity, etc
  2471  					errChan := make(chan error, 1)
  2472  					s.connectToPeer(
  2473  						a, errChan,
  2474  						s.cfg.ConnectionTimeout,
  2475  					)
  2476  					select {
  2477  					case err := <-errChan:
  2478  						if err == nil {
  2479  							return
  2480  						}
  2481  
  2482  						srvrLog.Errorf("Unable to "+
  2483  							"connect to %v: %v",
  2484  							a, err)
  2485  						atomic.AddUint32(&epochErrors, 1)
  2486  					case <-s.quit:
  2487  					}
  2488  				}(addr)
  2489  			}
  2490  		case <-s.quit:
  2491  			return
  2492  		}
  2493  	}
  2494  }
  2495  
  2496  // bootstrapBackOffCeiling is the maximum amount of time we'll wait between
  2497  // failed attempts to locate a set of bootstrap peers. We'll slowly double our
  2498  // query back off each time we encounter a failure.
  2499  const bootstrapBackOffCeiling = time.Minute * 5
  2500  
  2501  // initialPeerBootstrap attempts to continuously connect to peers on startup
  2502  // until the target number of peers has been reached. This ensures that nodes
  2503  // receive an up to date network view as soon as possible.
  2504  func (s *server) initialPeerBootstrap(ctx context.Context, ignore map[autopilot.NodeID]struct{},
  2505  	numTargetPeers uint32,
  2506  	bootstrappers []discovery.NetworkPeerBootstrapper) {
  2507  
  2508  	srvrLog.Debugf("Init bootstrap with targetPeers=%v, bootstrappers=%v, "+
  2509  		"ignore=%v", numTargetPeers, len(bootstrappers), len(ignore))
  2510  
  2511  	// We'll start off by waiting 2 seconds between failed attempts, then
  2512  	// double each time we fail until we hit the bootstrapBackOffCeiling.
  2513  	var delaySignal <-chan time.Time
  2514  	delayTime := time.Second * 2
  2515  
  2516  	// As want to be more aggressive, we'll use a lower back off celling
  2517  	// then the main peer bootstrap logic.
  2518  	backOffCeiling := bootstrapBackOffCeiling / 5
  2519  
  2520  	for attempts := 0; ; attempts++ {
  2521  		// Check if the server has been requested to shut down in order
  2522  		// to prevent blocking.
  2523  		if s.Stopped() {
  2524  			return
  2525  		}
  2526  
  2527  		// We can exit our aggressive initial peer bootstrapping stage
  2528  		// if we've reached out target number of peers.
  2529  		s.mu.RLock()
  2530  		numActivePeers := uint32(len(s.peersByPub))
  2531  		s.mu.RUnlock()
  2532  
  2533  		if numActivePeers >= numTargetPeers {
  2534  			return
  2535  		}
  2536  
  2537  		if attempts > 0 {
  2538  			srvrLog.Debugf("Waiting %v before trying to locate "+
  2539  				"bootstrap peers (attempt #%v)", delayTime,
  2540  				attempts)
  2541  
  2542  			// We've completed at least one iterating and haven't
  2543  			// finished, so we'll start to insert a delay period
  2544  			// between each attempt.
  2545  			delaySignal = time.After(delayTime)
  2546  			select {
  2547  			case <-delaySignal:
  2548  			case <-s.quit:
  2549  				return
  2550  			}
  2551  
  2552  			// After our delay, we'll double the time we wait up to
  2553  			// the max back off period.
  2554  			delayTime *= 2
  2555  			if delayTime > backOffCeiling {
  2556  				delayTime = backOffCeiling
  2557  			}
  2558  		}
  2559  
  2560  		// Otherwise, we'll request for the remaining number of peers
  2561  		// in order to reach our target.
  2562  		peersNeeded := numTargetPeers - numActivePeers
  2563  		bootstrapAddrs, err := discovery.MultiSourceBootstrap(
  2564  			ctx, ignore, peersNeeded, bootstrappers...,
  2565  		)
  2566  		if err == discovery.ErrNoAddressesFound {
  2567  			srvrLog.Errorf("No addresses returned by initial " +
  2568  				"boostrappers. Disabling bootstrapping.")
  2569  			return
  2570  		}
  2571  		if err != nil {
  2572  			srvrLog.Errorf("Unable to retrieve initial bootstrap "+
  2573  				"peers: %v", err)
  2574  			continue
  2575  		}
  2576  
  2577  		// Then, we'll attempt to establish a connection to the
  2578  		// different peer addresses retrieved by our bootstrappers.
  2579  		var wg sync.WaitGroup
  2580  		for _, bootstrapAddr := range bootstrapAddrs {
  2581  			wg.Add(1)
  2582  			go func(addr *lnwire.NetAddress) {
  2583  				defer wg.Done()
  2584  
  2585  				errChan := make(chan error, 1)
  2586  				go s.connectToPeer(
  2587  					addr, errChan, s.cfg.ConnectionTimeout,
  2588  				)
  2589  
  2590  				// We'll only allow this connection attempt to
  2591  				// take up to 3 seconds. This allows us to move
  2592  				// quickly by discarding peers that are slowing
  2593  				// us down.
  2594  				select {
  2595  				case err := <-errChan:
  2596  					if err == nil {
  2597  						return
  2598  					}
  2599  					srvrLog.Errorf("Unable to connect to "+
  2600  						"%v: %v", addr, err)
  2601  				// TODO: tune timeout? 3 seconds might be *too*
  2602  				// aggressive but works well.
  2603  				case <-time.After(3 * time.Second):
  2604  					srvrLog.Tracef("Skipping peer %v due "+
  2605  						"to not establishing a "+
  2606  						"connection within 3 seconds",
  2607  						addr)
  2608  				case <-s.quit:
  2609  				}
  2610  			}(bootstrapAddr)
  2611  		}
  2612  
  2613  		wg.Wait()
  2614  	}
  2615  }
  2616  
  2617  // createNewHiddenService automatically sets up a v2 or v3 onion service in
  2618  // order to listen for inbound connections over Tor.
  2619  func (s *server) createNewHiddenService() error {
  2620  	// Determine the different ports the server is listening on. The onion
  2621  	// service's virtual port will map to these ports and one will be picked
  2622  	// at random when the onion service is being accessed.
  2623  	listenPorts := make([]int, 0, len(s.listenAddrs))
  2624  	for _, listenAddr := range s.listenAddrs {
  2625  		port := listenAddr.(*net.TCPAddr).Port
  2626  		listenPorts = append(listenPorts, port)
  2627  	}
  2628  
  2629  	// Once the port mapping has been set, we can go ahead and automatically
  2630  	// create our onion service. The service's private key will be saved to
  2631  	// disk in order to regain access to this service when restarting `lnd`.
  2632  	onionCfg := tor.AddOnionConfig{
  2633  		VirtualPort: defaultPeerPort,
  2634  		TargetPorts: listenPorts,
  2635  		Store:       tor.NewOnionFile(s.cfg.Tor.PrivateKeyPath, 0600),
  2636  	}
  2637  
  2638  	switch {
  2639  	case s.cfg.Tor.V2:
  2640  		onionCfg.Type = tor.V2
  2641  	case s.cfg.Tor.V3:
  2642  		onionCfg.Type = tor.V3
  2643  	}
  2644  
  2645  	addr, err := s.torController.AddOnion(onionCfg)
  2646  	if err != nil {
  2647  		return err
  2648  	}
  2649  
  2650  	// Now that the onion service has been created, we'll add the onion
  2651  	// address it can be reached at to our list of advertised addresses.
  2652  	newNodeAnn, err := s.genNodeAnnouncement(
  2653  		true, func(currentAnn *lnwire.NodeAnnouncement) {
  2654  			currentAnn.Addresses = append(currentAnn.Addresses, addr)
  2655  		},
  2656  	)
  2657  	if err != nil {
  2658  		return fmt.Errorf("unable to generate new node "+
  2659  			"announcement: %v", err)
  2660  	}
  2661  
  2662  	// Finally, we'll update the on-disk version of our announcement so it
  2663  	// will eventually propagate to nodes in the network.
  2664  	selfNode := &channeldb.LightningNode{
  2665  		HaveNodeAnnouncement: true,
  2666  		LastUpdate:           time.Unix(int64(newNodeAnn.Timestamp), 0),
  2667  		Addresses:            newNodeAnn.Addresses,
  2668  		Alias:                newNodeAnn.Alias.String(),
  2669  		Features: lnwire.NewFeatureVector(
  2670  			newNodeAnn.Features, lnwire.Features,
  2671  		),
  2672  		Color:        newNodeAnn.RGBColor,
  2673  		AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(),
  2674  	}
  2675  	copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed())
  2676  	if err := s.graphDB.SetSourceNode(selfNode); err != nil {
  2677  		return fmt.Errorf("can't set self node: %v", err)
  2678  	}
  2679  
  2680  	return nil
  2681  }
  2682  
  2683  // genNodeAnnouncement generates and returns the current fully signed node
  2684  // announcement. If refresh is true, then the time stamp of the announcement
  2685  // will be updated in order to ensure it propagates through the network.
  2686  func (s *server) genNodeAnnouncement(refresh bool,
  2687  	modifiers ...netann.NodeAnnModifier) (lnwire.NodeAnnouncement, error) {
  2688  
  2689  	s.mu.Lock()
  2690  	defer s.mu.Unlock()
  2691  
  2692  	// If we don't need to refresh the announcement, then we can return a
  2693  	// copy of our cached version.
  2694  	if !refresh {
  2695  		return *s.currentNodeAnn, nil
  2696  	}
  2697  
  2698  	// Always update the timestamp when refreshing to ensure the update
  2699  	// propagates.
  2700  	modifiers = append(modifiers, netann.NodeAnnSetTimestamp)
  2701  
  2702  	// Otherwise, we'll sign a new update after applying all of the passed
  2703  	// modifiers.
  2704  	err := netann.SignNodeAnnouncement(
  2705  		s.nodeSigner, s.identityKeyLoc, s.currentNodeAnn,
  2706  		modifiers...,
  2707  	)
  2708  	if err != nil {
  2709  		return lnwire.NodeAnnouncement{}, err
  2710  	}
  2711  
  2712  	return *s.currentNodeAnn, nil
  2713  }
  2714  
  2715  type nodeAddresses struct {
  2716  	pubKey    *secp256k1.PublicKey
  2717  	addresses []net.Addr
  2718  }
  2719  
  2720  // establishPersistentConnections attempts to establish persistent connections
  2721  // to all our direct channel collaborators. In order to promote liveness of our
  2722  // active channels, we instruct the connection manager to attempt to establish
  2723  // and maintain persistent connections to all our direct channel counterparties.
  2724  func (s *server) establishPersistentConnections() error {
  2725  	// nodeAddrsMap stores the combination of node public keys and addresses
  2726  	// that we'll attempt to reconnect to. PubKey strings are used as keys
  2727  	// since other PubKey forms can't be compared.
  2728  	nodeAddrsMap := map[string]*nodeAddresses{}
  2729  
  2730  	srvrLog.Infof("Trying to establish persistent connections")
  2731  
  2732  	// Iterate through the list of LinkNodes to find addresses we should
  2733  	// attempt to connect to based on our set of previous connections. Set
  2734  	// the reconnection port to the default peer port.
  2735  	linkNodes, err := s.chanStateDB.LinkNodeDB().FetchAllLinkNodes()
  2736  	if err != nil && err != channeldb.ErrLinkNodesNotFound {
  2737  		return err
  2738  	}
  2739  	for _, node := range linkNodes {
  2740  		pubStr := string(node.IdentityPub.SerializeCompressed())
  2741  		nodeAddrs := &nodeAddresses{
  2742  			pubKey:    node.IdentityPub,
  2743  			addresses: node.Addresses,
  2744  		}
  2745  		nodeAddrsMap[pubStr] = nodeAddrs
  2746  	}
  2747  
  2748  	// After checking our previous connections for addresses to connect to,
  2749  	// iterate through the nodes in our channel graph to find addresses
  2750  	// that have been added via NodeAnnouncement messages.
  2751  	sourceNode, err := s.graphDB.SourceNode()
  2752  	if err != nil {
  2753  		return err
  2754  	}
  2755  
  2756  	// TODO(roasbeef): instead iterate over link nodes and query graph for
  2757  	// each of the nodes.
  2758  	selfPub := s.identityECDH.PubKey().SerializeCompressed()
  2759  	err = sourceNode.ForEachChannel(nil, func(
  2760  		tx kvdb.RTx,
  2761  		chanInfo *channeldb.ChannelEdgeInfo,
  2762  		policy, _ *channeldb.ChannelEdgePolicy) error {
  2763  
  2764  		// If the remote party has announced the channel to us, but we
  2765  		// haven't yet, then we won't have a policy. However, we don't
  2766  		// need this to connect to the peer, so we'll log it and move on.
  2767  		if policy == nil {
  2768  			srvrLog.Warnf("No channel policy found for "+
  2769  				"ChannelPoint(%v): ", chanInfo.ChannelPoint)
  2770  		}
  2771  
  2772  		// We'll now fetch the peer opposite from us within this
  2773  		// channel so we can queue up a direct connection to them.
  2774  		channelPeer, err := chanInfo.FetchOtherNode(tx, selfPub)
  2775  		if err != nil {
  2776  			return fmt.Errorf("unable to fetch channel peer for "+
  2777  				"ChannelPoint(%v): %v", chanInfo.ChannelPoint,
  2778  				err)
  2779  		}
  2780  
  2781  		pubStr := string(channelPeer.PubKeyBytes[:])
  2782  
  2783  		// Add all unique addresses from channel
  2784  		// graph/NodeAnnouncements to the list of addresses we'll
  2785  		// connect to for this peer.
  2786  		addrSet := make(map[string]net.Addr)
  2787  		for _, addr := range channelPeer.Addresses {
  2788  			switch addr.(type) {
  2789  			case *net.TCPAddr:
  2790  				addrSet[addr.String()] = addr
  2791  
  2792  			// We'll only attempt to connect to Tor addresses if Tor
  2793  			// outbound support is enabled.
  2794  			case *tor.OnionAddr:
  2795  				if s.cfg.Tor.Active {
  2796  					addrSet[addr.String()] = addr
  2797  				}
  2798  			}
  2799  		}
  2800  
  2801  		// If this peer is also recorded as a link node, we'll add any
  2802  		// additional addresses that have not already been selected.
  2803  		linkNodeAddrs, ok := nodeAddrsMap[pubStr]
  2804  		if ok {
  2805  			for _, lnAddress := range linkNodeAddrs.addresses {
  2806  				switch lnAddress.(type) {
  2807  				case *net.TCPAddr:
  2808  					addrSet[lnAddress.String()] = lnAddress
  2809  
  2810  				// We'll only attempt to connect to Tor
  2811  				// addresses if Tor outbound support is enabled.
  2812  				case *tor.OnionAddr:
  2813  					if s.cfg.Tor.Active {
  2814  						addrSet[lnAddress.String()] = lnAddress
  2815  					}
  2816  				}
  2817  			}
  2818  		}
  2819  
  2820  		// Construct a slice of the deduped addresses.
  2821  		var addrs []net.Addr
  2822  		for _, addr := range addrSet {
  2823  			addrs = append(addrs, addr)
  2824  		}
  2825  
  2826  		n := &nodeAddresses{
  2827  			addresses: addrs,
  2828  		}
  2829  		n.pubKey, err = channelPeer.PubKey()
  2830  		if err != nil {
  2831  			return err
  2832  		}
  2833  
  2834  		nodeAddrsMap[pubStr] = n
  2835  		return nil
  2836  	})
  2837  	if err != nil && err != channeldb.ErrGraphNoEdgesFound {
  2838  		return err
  2839  	}
  2840  
  2841  	srvrLog.Debugf("Establishing %v persistent connections on start",
  2842  		len(nodeAddrsMap))
  2843  
  2844  	// Acquire and hold server lock until all persistent connection requests
  2845  	// have been recorded and sent to the connection manager.
  2846  	s.mu.Lock()
  2847  	defer s.mu.Unlock()
  2848  
  2849  	// Iterate through the combined list of addresses from prior links and
  2850  	// node announcements and attempt to reconnect to each node.
  2851  	var numOutboundConns int
  2852  	for pubStr, nodeAddr := range nodeAddrsMap {
  2853  		// Add this peer to the set of peers we should maintain a
  2854  		// persistent connection with. We set the value to false to
  2855  		// indicate that we should not continue to reconnect if the
  2856  		// number of channels returns to zero, since this peer has not
  2857  		// been requested as perm by the user.
  2858  		s.persistentPeers[pubStr] = false
  2859  		if _, ok := s.persistentPeersBackoff[pubStr]; !ok {
  2860  			s.persistentPeersBackoff[pubStr] = s.cfg.MinBackoff
  2861  		}
  2862  
  2863  		// We might have been contacted by this peer at this point, so
  2864  		// check that and ignore if we already have a connection.
  2865  		peer, err := s.findPeerByPubStr(pubStr)
  2866  		if err == nil {
  2867  			srvrLog.Debugf("Skipping already connected persistent "+
  2868  				"peer %x@%s", pubStr, peer)
  2869  			continue
  2870  		}
  2871  
  2872  		for _, address := range nodeAddr.addresses {
  2873  			// Create a wrapper address which couples the IP and
  2874  			// the pubkey so the brontide authenticated connection
  2875  			// can be established.
  2876  			lnAddr := &lnwire.NetAddress{
  2877  				IdentityKey: nodeAddr.pubKey,
  2878  				Address:     address,
  2879  			}
  2880  
  2881  			s.persistentPeerAddrs[pubStr] = append(
  2882  				s.persistentPeerAddrs[pubStr], lnAddr)
  2883  		}
  2884  
  2885  		// We'll connect to the first 10 peers immediately, then
  2886  		// randomly stagger any remaining connections if the
  2887  		// stagger initial reconnect flag is set. This ensures
  2888  		// that mobile nodes or nodes with a small number of
  2889  		// channels obtain connectivity quickly, but larger
  2890  		// nodes are able to disperse the costs of connecting to
  2891  		// all peers at once.
  2892  		if numOutboundConns < numInstantInitReconnect ||
  2893  			!s.cfg.StaggerInitialReconnect {
  2894  
  2895  			go s.connectToPersistentPeer(pubStr)
  2896  		} else {
  2897  			go s.delayInitialReconnect(pubStr)
  2898  		}
  2899  
  2900  		numOutboundConns++
  2901  	}
  2902  
  2903  	return nil
  2904  }
  2905  
  2906  // delayInitialReconnect will attempt a reconnection to the given peer after
  2907  // sampling a value for the delay between 0s and the maxInitReconnectDelay.
  2908  //
  2909  // NOTE: This method MUST be run as a goroutine.
  2910  func (s *server) delayInitialReconnect(pubStr string) {
  2911  	delay := time.Duration(prand.Intn(maxInitReconnectDelay)) * time.Second
  2912  	select {
  2913  	case <-time.After(delay):
  2914  		s.connectToPersistentPeer(pubStr)
  2915  	case <-s.quit:
  2916  	}
  2917  }
  2918  
  2919  // prunePersistentPeerConnection removes all internal state related to
  2920  // persistent connections to a peer within the server. This is used to avoid
  2921  // persistent connection retries to peers we do not have any open channels with.
  2922  func (s *server) prunePersistentPeerConnection(compressedPubKey [33]byte) {
  2923  	pubKeyStr := string(compressedPubKey[:])
  2924  
  2925  	s.mu.Lock()
  2926  	if perm, ok := s.persistentPeers[pubKeyStr]; ok && !perm {
  2927  		delete(s.persistentPeers, pubKeyStr)
  2928  		delete(s.persistentPeersBackoff, pubKeyStr)
  2929  		delete(s.persistentPeerAddrs, pubKeyStr)
  2930  		s.cancelConnReqs(pubKeyStr, nil)
  2931  		s.mu.Unlock()
  2932  
  2933  		srvrLog.Infof("Pruned peer %x from persistent connections, "+
  2934  			"peer has no open channels", compressedPubKey)
  2935  
  2936  		return
  2937  	}
  2938  	s.mu.Unlock()
  2939  }
  2940  
  2941  // BroadcastMessage sends a request to the server to broadcast a set of
  2942  // messages to all peers other than the one specified by the `skips` parameter.
  2943  // All messages sent via BroadcastMessage will be queued for lazy delivery to
  2944  // the target peers.
  2945  //
  2946  // NOTE: This function is safe for concurrent access.
  2947  func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
  2948  	msgs ...lnwire.Message) error {
  2949  
  2950  	// Filter out peers found in the skips map. We synchronize access to
  2951  	// peersByPub throughout this process to ensure we deliver messages to
  2952  	// exact set of peers present at the time of invocation.
  2953  	s.mu.RLock()
  2954  	peers := make([]*peer.Brontide, 0, len(s.peersByPub))
  2955  	for _, sPeer := range s.peersByPub {
  2956  		if skips != nil {
  2957  			if _, ok := skips[sPeer.PubKey()]; ok {
  2958  				srvrLog.Debugf("Skipping %x in broadcast",
  2959  					sPeer.PubKey())
  2960  				continue
  2961  			}
  2962  		}
  2963  
  2964  		peers = append(peers, sPeer)
  2965  	}
  2966  	s.mu.RUnlock()
  2967  
  2968  	srvrLog.Debugf("Broadcasting %v messages to %d peers with %d in skipList",
  2969  		len(msgs), len(peers), len(skips))
  2970  
  2971  	// Iterate over all known peers, dispatching a go routine to enqueue
  2972  	// all messages to each of peers.
  2973  	var wg sync.WaitGroup
  2974  	for _, sPeer := range peers {
  2975  		srvrLog.Debugf("Sending messages [%s] to peer %s",
  2976  			lnwire.MessagesTypesLogger(msgs), sPeer)
  2977  
  2978  		// Dispatch a go routine to enqueue all messages to this peer.
  2979  		wg.Add(1)
  2980  		s.wg.Add(1)
  2981  		go func(p lnpeer.Peer) {
  2982  			defer s.wg.Done()
  2983  			defer wg.Done()
  2984  
  2985  			p.SendMessageLazy(false, msgs...)
  2986  		}(sPeer)
  2987  	}
  2988  
  2989  	// Wait for all messages to have been dispatched before returning to
  2990  	// caller.
  2991  	wg.Wait()
  2992  
  2993  	return nil
  2994  }
  2995  
  2996  // NotifyWhenOnline can be called by other subsystems to get notified when a
  2997  // particular peer comes online. The peer itself is sent across the peerChan.
  2998  //
  2999  // NOTE: This function is safe for concurrent access.
  3000  func (s *server) NotifyWhenOnline(peerKey [33]byte,
  3001  	peerChan chan<- lnpeer.Peer) {
  3002  
  3003  	s.mu.Lock()
  3004  	defer s.mu.Unlock()
  3005  
  3006  	// Compute the target peer's identifier.
  3007  	pubStr := string(peerKey[:])
  3008  
  3009  	// Check if peer is connected.
  3010  	peer, ok := s.peersByPub[pubStr]
  3011  	if ok {
  3012  		// Connected, can return early.
  3013  		srvrLog.Debugf("Notifying that peer %x is online", peerKey)
  3014  
  3015  		select {
  3016  		case peerChan <- peer:
  3017  		case <-s.quit:
  3018  		}
  3019  
  3020  		return
  3021  	}
  3022  
  3023  	// Not connected, store this listener such that it can be notified when
  3024  	// the peer comes online.
  3025  	s.peerConnectedListeners[pubStr] = append(
  3026  		s.peerConnectedListeners[pubStr], peerChan,
  3027  	)
  3028  }
  3029  
  3030  // NotifyWhenOffline delivers a notification to the caller of when the peer with
  3031  // the given public key has been disconnected. The notification is signaled by
  3032  // closing the channel returned.
  3033  func (s *server) NotifyWhenOffline(peerPubKey [33]byte) <-chan struct{} {
  3034  	s.mu.Lock()
  3035  	defer s.mu.Unlock()
  3036  
  3037  	c := make(chan struct{})
  3038  
  3039  	// If the peer is already offline, we can immediately trigger the
  3040  	// notification.
  3041  	peerPubKeyStr := string(peerPubKey[:])
  3042  	if _, ok := s.peersByPub[peerPubKeyStr]; !ok {
  3043  		srvrLog.Debugf("Notifying that peer %x is offline", peerPubKey)
  3044  		close(c)
  3045  		return c
  3046  	}
  3047  
  3048  	// Otherwise, the peer is online, so we'll keep track of the channel to
  3049  	// trigger the notification once the server detects the peer
  3050  	// disconnects.
  3051  	s.peerDisconnectedListeners[peerPubKeyStr] = append(
  3052  		s.peerDisconnectedListeners[peerPubKeyStr], c,
  3053  	)
  3054  
  3055  	return c
  3056  }
  3057  
  3058  // FindPeer will return the peer that corresponds to the passed in public key.
  3059  // This function is used by the funding manager, allowing it to update the
  3060  // daemon's local representation of the remote peer.
  3061  //
  3062  // NOTE: This function is safe for concurrent access.
  3063  func (s *server) FindPeer(peerKey *secp256k1.PublicKey) (*peer.Brontide, error) {
  3064  	s.mu.RLock()
  3065  	defer s.mu.RUnlock()
  3066  
  3067  	pubStr := string(peerKey.SerializeCompressed())
  3068  
  3069  	return s.findPeerByPubStr(pubStr)
  3070  }
  3071  
  3072  // FindPeerByPubStr will return the peer that corresponds to the passed peerID,
  3073  // which should be a string representation of the peer's serialized, compressed
  3074  // public key.
  3075  //
  3076  // NOTE: This function is safe for concurrent access.
  3077  func (s *server) FindPeerByPubStr(pubStr string) (*peer.Brontide, error) {
  3078  	s.mu.RLock()
  3079  	defer s.mu.RUnlock()
  3080  
  3081  	return s.findPeerByPubStr(pubStr)
  3082  }
  3083  
  3084  // findPeerByPubStr is an internal method that retrieves the specified peer from
  3085  // the server's internal state using.
  3086  func (s *server) findPeerByPubStr(pubStr string) (*peer.Brontide, error) {
  3087  	peer, ok := s.peersByPub[pubStr]
  3088  	if !ok {
  3089  		return nil, ErrPeerNotConnected
  3090  	}
  3091  
  3092  	return peer, nil
  3093  }
  3094  
  3095  // nextPeerBackoff computes the next backoff duration for a peer's pubkey using
  3096  // exponential backoff. If no previous backoff was known, the default is
  3097  // returned.
  3098  func (s *server) nextPeerBackoff(pubStr string,
  3099  	startTime time.Time) time.Duration {
  3100  
  3101  	// Now, determine the appropriate backoff to use for the retry.
  3102  	backoff, ok := s.persistentPeersBackoff[pubStr]
  3103  	if !ok {
  3104  		// If an existing backoff was unknown, use the default.
  3105  		return s.cfg.MinBackoff
  3106  	}
  3107  
  3108  	// If the peer failed to start properly, we'll just use the previous
  3109  	// backoff to compute the subsequent randomized exponential backoff
  3110  	// duration. This will roughly double on average.
  3111  	if startTime.IsZero() {
  3112  		return computeNextBackoff(backoff, s.cfg.MaxBackoff)
  3113  	}
  3114  
  3115  	// The peer succeeded in starting. If the connection didn't last long
  3116  	// enough to be considered stable, we'll continue to back off retries
  3117  	// with this peer.
  3118  	connDuration := time.Since(startTime)
  3119  	if connDuration < defaultStableConnDuration {
  3120  		return computeNextBackoff(backoff, s.cfg.MaxBackoff)
  3121  	}
  3122  
  3123  	// The peer succeed in starting and this was stable peer, so we'll
  3124  	// reduce the timeout duration by the length of the connection after
  3125  	// applying randomized exponential backoff. We'll only apply this in the
  3126  	// case that:
  3127  	//   reb(curBackoff) - connDuration > cfg.MinBackoff
  3128  	relaxedBackoff := computeNextBackoff(backoff, s.cfg.MaxBackoff) - connDuration
  3129  	if relaxedBackoff > s.cfg.MinBackoff {
  3130  		return relaxedBackoff
  3131  	}
  3132  
  3133  	// Lastly, if reb(currBackoff) - connDuration <= cfg.MinBackoff, meaning
  3134  	// the stable connection lasted much longer than our previous backoff.
  3135  	// To reward such good behavior, we'll reconnect after the default
  3136  	// timeout.
  3137  	return s.cfg.MinBackoff
  3138  }
  3139  
  3140  // shouldDropConnection determines if our local connection to a remote peer
  3141  // should be dropped in the case of concurrent connection establishment. In
  3142  // order to deterministically decide which connection should be dropped, we'll
  3143  // utilize the ordering of the local and remote public key. If we didn't use
  3144  // such a tie breaker, then we risk _both_ connections erroneously being
  3145  // dropped.
  3146  func shouldDropLocalConnection(local, remote *secp256k1.PublicKey) bool {
  3147  	localPubBytes := local.SerializeCompressed()
  3148  	remotePubPbytes := remote.SerializeCompressed()
  3149  
  3150  	// The connection that comes from the node with a "smaller" pubkey
  3151  	// should be kept. Therefore, if our pubkey is "greater" than theirs, we
  3152  	// should drop our established connection.
  3153  	return bytes.Compare(localPubBytes, remotePubPbytes) > 0
  3154  }
  3155  
  3156  // InboundPeerConnected initializes a new peer in response to a new inbound
  3157  // connection.
  3158  //
  3159  // NOTE: This function is safe for concurrent access.
  3160  func (s *server) InboundPeerConnected(conn net.Conn) {
  3161  	// Exit early if we have already been instructed to shutdown, this
  3162  	// prevents any delayed callbacks from accidentally registering peers.
  3163  	if s.Stopped() {
  3164  		return
  3165  	}
  3166  
  3167  	nodePub := conn.(*brontide.Conn).RemotePub()
  3168  	pubStr := string(nodePub.SerializeCompressed())
  3169  
  3170  	s.mu.Lock()
  3171  	defer s.mu.Unlock()
  3172  
  3173  	// If we already have an inbound connection to this peer, then ignore
  3174  	// this new, duplicate connection (keep the older one).
  3175  	if _, ok := s.inboundPeers[pubStr]; ok {
  3176  		srvrLog.Debugf("Already have inbound connection for %x, "+
  3177  			"ignoring new inbound connection",
  3178  			nodePub.SerializeCompressed())
  3179  
  3180  		conn.Close()
  3181  		return
  3182  	}
  3183  
  3184  	// If we already have a valid connection that is scheduled to take
  3185  	// precedence once the prior peer has finished disconnecting, we'll
  3186  	// ignore this connection.
  3187  	if p, ok := s.scheduledPeerConnection[pubStr]; ok {
  3188  		srvrLog.Debugf("Ignoring connection from %v, peer %v already "+
  3189  			"scheduled", conn.RemoteAddr(), p)
  3190  		conn.Close()
  3191  		return
  3192  	}
  3193  
  3194  	srvrLog.Infof("New inbound connection from %v", conn.RemoteAddr())
  3195  
  3196  	// Check to see if we already have a connection with this peer. If so,
  3197  	// we may need to drop our existing connection. This prevents us from
  3198  	// having duplicate connections to the same peer. We forgo adding a
  3199  	// default case as we expect these to be the only error values returned
  3200  	// from findPeerByPubStr.
  3201  	connectedPeer, err := s.findPeerByPubStr(pubStr)
  3202  	switch err {
  3203  	case ErrPeerNotConnected:
  3204  		// We were unable to locate an existing connection with the
  3205  		// target peer, proceed to connect.
  3206  		s.cancelConnReqs(pubStr, nil)
  3207  		s.peerConnected(conn, nil, true)
  3208  
  3209  	case nil:
  3210  		// We already have a connection with the incoming peer. If the
  3211  		// connection we've already established should be kept, then
  3212  		// we'll close out this connection s.t there's only a single
  3213  		// connection between us.
  3214  		localPub := s.identityECDH.PubKey()
  3215  		if !shouldDropLocalConnection(localPub, nodePub) {
  3216  			srvrLog.Warnf("Received inbound connection from peer %v, "+
  3217  				"but already connected, dropping conn",
  3218  				connectedPeer)
  3219  			conn.Close()
  3220  			return
  3221  		}
  3222  
  3223  		// Otherwise, if we should drop the connection, then we'll
  3224  		// disconnect our already connected peer.
  3225  		srvrLog.Debugf("Disconnecting stale connection to %v",
  3226  			connectedPeer)
  3227  
  3228  		s.cancelConnReqs(pubStr, nil)
  3229  
  3230  		// Remove the current peer from the server's internal state and
  3231  		// signal that the peer termination watcher does not need to
  3232  		// execute for this peer.
  3233  		s.removePeer(connectedPeer)
  3234  		s.ignorePeerTermination[connectedPeer] = struct{}{}
  3235  		s.scheduledPeerConnection[pubStr] = func() {
  3236  			s.peerConnected(conn, nil, true)
  3237  		}
  3238  	}
  3239  }
  3240  
  3241  // OutboundPeerConnected initializes a new peer in response to a new outbound
  3242  // connection.
  3243  // NOTE: This function is safe for concurrent access.
  3244  func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) {
  3245  	// Exit early if we have already been instructed to shutdown, this
  3246  	// prevents any delayed callbacks from accidentally registering peers.
  3247  	if s.Stopped() {
  3248  		return
  3249  	}
  3250  
  3251  	nodePub := conn.(*brontide.Conn).RemotePub()
  3252  	pubStr := string(nodePub.SerializeCompressed())
  3253  
  3254  	s.mu.Lock()
  3255  	defer s.mu.Unlock()
  3256  
  3257  	// If we already have an outbound connection to this peer, then ignore
  3258  	// this new (duplicated) connection.
  3259  	if _, ok := s.outboundPeers[pubStr]; ok {
  3260  		srvrLog.Debugf("Already have outbound connection for %x, "+
  3261  			"ignoring new (duplicated) connection",
  3262  			nodePub.SerializeCompressed())
  3263  
  3264  		if connReq != nil {
  3265  			s.connMgr.Remove(connReq.ID())
  3266  		}
  3267  		conn.Close()
  3268  		return
  3269  	}
  3270  	if _, ok := s.persistentConnReqs[pubStr]; !ok && connReq != nil {
  3271  		srvrLog.Debugf("Ignoring canceled outbound connection")
  3272  		s.connMgr.Remove(connReq.ID())
  3273  		conn.Close()
  3274  		return
  3275  	}
  3276  
  3277  	// If we already have a valid connection that is scheduled to take
  3278  	// precedence once the prior peer has finished disconnecting, we'll
  3279  	// ignore this connection.
  3280  	if _, ok := s.scheduledPeerConnection[pubStr]; ok {
  3281  		srvrLog.Debugf("Ignoring connection, peer already scheduled")
  3282  
  3283  		if connReq != nil {
  3284  			s.connMgr.Remove(connReq.ID())
  3285  		}
  3286  
  3287  		conn.Close()
  3288  		return
  3289  	}
  3290  
  3291  	srvrLog.Infof("Established connection to: %x@%v", pubStr,
  3292  		conn.RemoteAddr())
  3293  
  3294  	if connReq != nil {
  3295  		// A successful connection was returned by the connmgr.
  3296  		// Immediately cancel all pending requests, excluding the
  3297  		// outbound connection we just established.
  3298  		ignore := connReq.ID()
  3299  		s.cancelConnReqs(pubStr, &ignore)
  3300  	} else {
  3301  		// This was a successful connection made by some other
  3302  		// subsystem. Remove all requests being managed by the connmgr.
  3303  		s.cancelConnReqs(pubStr, nil)
  3304  	}
  3305  
  3306  	// If we already have a connection with this peer, decide whether or not
  3307  	// we need to drop the stale connection. We forgo adding a default case
  3308  	// as we expect these to be the only error values returned from
  3309  	// findPeerByPubStr.
  3310  	connectedPeer, err := s.findPeerByPubStr(pubStr)
  3311  	switch err {
  3312  	case ErrPeerNotConnected:
  3313  		// We were unable to locate an existing connection with the
  3314  		// target peer, proceed to connect.
  3315  		s.peerConnected(conn, connReq, false)
  3316  
  3317  	case nil:
  3318  		// We already have a connection open with the target peer.
  3319  		// If our (this) connection should be dropped, then we'll do
  3320  		// so, in order to ensure we don't have any duplicate
  3321  		// connections.
  3322  		localPub := s.identityECDH.PubKey()
  3323  		if shouldDropLocalConnection(localPub, nodePub) {
  3324  			srvrLog.Warnf("Established outbound connection to peer %v, "+
  3325  				"but already connected, dropping conn",
  3326  				connectedPeer)
  3327  			if connReq != nil {
  3328  				s.connMgr.Remove(connReq.ID())
  3329  			}
  3330  			conn.Close()
  3331  			return
  3332  		}
  3333  
  3334  		// Otherwise, _their_ connection should be dropped. So we'll
  3335  		// disconnect the peer and send the now obsolete peer to the
  3336  		// server for garbage collection.
  3337  		srvrLog.Debugf("Disconnecting stale connection to %v",
  3338  			connectedPeer)
  3339  
  3340  		// Remove the current peer from the server's internal state and
  3341  		// signal that the peer termination watcher does not need to
  3342  		// execute for this peer.
  3343  		s.removePeer(connectedPeer)
  3344  		s.ignorePeerTermination[connectedPeer] = struct{}{}
  3345  		s.scheduledPeerConnection[pubStr] = func() {
  3346  			s.peerConnected(conn, connReq, false)
  3347  		}
  3348  	}
  3349  }
  3350  
  3351  // UnassignedConnID is the default connection ID that a request can have before
  3352  // it actually is submitted to the connmgr.
  3353  // TODO(conner): move into connmgr package, or better, add connmgr method for
  3354  // generating atomic IDs
  3355  const UnassignedConnID uint64 = 0
  3356  
  3357  // cancelConnReqs stops all persistent connection requests for a given pubkey.
  3358  // Any attempts initiated by the peerTerminationWatcher are canceled first.
  3359  // Afterwards, each connection request removed from the connmgr. The caller can
  3360  // optionally specify a connection ID to ignore, which prevents us from
  3361  // canceling a successful request. All persistent connreqs for the provided
  3362  // pubkey are discarded after the operationjw.
  3363  func (s *server) cancelConnReqs(pubStr string, skip *uint64) {
  3364  	// First, cancel any lingering persistent retry attempts, which will
  3365  	// prevent retries for any with backoffs that are still maturing.
  3366  	if cancelChan, ok := s.persistentRetryCancels[pubStr]; ok {
  3367  		close(cancelChan)
  3368  		delete(s.persistentRetryCancels, pubStr)
  3369  	}
  3370  
  3371  	// Next, check to see if we have any outstanding persistent connection
  3372  	// requests to this peer. If so, then we'll remove all of these
  3373  	// connection requests, and also delete the entry from the map.
  3374  	connReqs, ok := s.persistentConnReqs[pubStr]
  3375  	if !ok {
  3376  		return
  3377  	}
  3378  
  3379  	for _, connReq := range connReqs {
  3380  		srvrLog.Tracef("Canceling %s:", connReqs)
  3381  
  3382  		// Atomically capture the current request identifier.
  3383  		connID := connReq.ID()
  3384  
  3385  		// Skip any zero IDs, this indicates the request has not
  3386  		// yet been schedule.
  3387  		if connID == UnassignedConnID {
  3388  			continue
  3389  		}
  3390  
  3391  		// Skip a particular connection ID if instructed.
  3392  		if skip != nil && connID == *skip {
  3393  			continue
  3394  		}
  3395  
  3396  		s.connMgr.Remove(connID)
  3397  	}
  3398  
  3399  	delete(s.persistentConnReqs, pubStr)
  3400  }
  3401  
  3402  // handleCustomMessage dispatches an incoming custom peers message to
  3403  // subscribers.
  3404  func (s *server) handleCustomMessage(peer [33]byte, msg *lnwire.Custom) error {
  3405  	srvrLog.Debugf("Custom message received: peer=%x, type=%d",
  3406  		peer, msg.Type)
  3407  
  3408  	return s.customMessageServer.SendUpdate(&CustomMessage{
  3409  		Peer: peer,
  3410  		Msg:  msg,
  3411  	})
  3412  }
  3413  
  3414  // SubscribeCustomMessages subscribes to a stream of incoming custom peer
  3415  // messages.
  3416  func (s *server) SubscribeCustomMessages() (*subscribe.Client, error) {
  3417  	return s.customMessageServer.Subscribe()
  3418  }
  3419  
  3420  // peerConnected is a function that handles initialization a newly connected
  3421  // peer by adding it to the server's global list of all active peers, and
  3422  // starting all the goroutines the peer needs to function properly. The inbound
  3423  // boolean should be true if the peer initiated the connection to us.
  3424  func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
  3425  	inbound bool) {
  3426  
  3427  	brontideConn := conn.(*brontide.Conn)
  3428  	addr := conn.RemoteAddr()
  3429  	pubKey := brontideConn.RemotePub()
  3430  
  3431  	srvrLog.Infof("Finalizing connection to %x@%s, inbound=%v",
  3432  		pubKey.SerializeCompressed(), addr, inbound)
  3433  
  3434  	peerAddr := &lnwire.NetAddress{
  3435  		IdentityKey: pubKey,
  3436  		Address:     addr,
  3437  		ChainNet:    s.cfg.ActiveNetParams.Net,
  3438  	}
  3439  
  3440  	// With the brontide connection established, we'll now craft the feature
  3441  	// vectors to advertise to the remote node.
  3442  	initFeatures := s.featureMgr.Get(feature.SetInit)
  3443  	legacyFeatures := s.featureMgr.Get(feature.SetLegacyGlobal)
  3444  
  3445  	// Lookup past error caches for the peer in the server. If no buffer is
  3446  	// found, create a fresh buffer.
  3447  	pkStr := string(peerAddr.IdentityKey.SerializeCompressed())
  3448  	errBuffer, ok := s.peerErrors[pkStr]
  3449  	if !ok {
  3450  		var err error
  3451  		errBuffer, err = queue.NewCircularBuffer(peer.ErrorBufferSize)
  3452  		if err != nil {
  3453  			srvrLog.Errorf("unable to create peer %v", err)
  3454  			return
  3455  		}
  3456  	}
  3457  
  3458  	// Now that we've established a connection, create a peer, and it to the
  3459  	// set of currently active peers. Configure the peer with the incoming
  3460  	// and outgoing broadcast deltas to prevent htlcs from being accepted or
  3461  	// offered that would trigger channel closure. In case of outgoing
  3462  	// htlcs, an extra block is added to prevent the channel from being
  3463  	// closed when the htlc is outstanding and a new block comes in.
  3464  	pCfg := peer.Config{
  3465  		Conn:                    brontideConn,
  3466  		ConnReq:                 connReq,
  3467  		Addr:                    peerAddr,
  3468  		Inbound:                 inbound,
  3469  		Features:                initFeatures,
  3470  		LegacyFeatures:          legacyFeatures,
  3471  		OutgoingCltvRejectDelta: lncfg.DefaultOutgoingCltvRejectDelta,
  3472  		ChanActiveTimeout:       s.cfg.ChanEnableTimeout,
  3473  		ErrorBuffer:             errBuffer,
  3474  		WritePool:               s.writePool,
  3475  		ReadPool:                s.readPool,
  3476  		Switch:                  s.htlcSwitch,
  3477  		InterceptSwitch:         s.interceptableSwitch,
  3478  		ChannelDB:               s.chanStateDB,
  3479  		ChannelGraph:            s.graphDB,
  3480  		ChainArb:                s.chainArb,
  3481  		AuthGossiper:            s.authGossiper,
  3482  		ChanStatusMgr:           s.chanStatusMgr,
  3483  		ChainIO:                 s.cc.ChainIO,
  3484  		FeeEstimator:            s.cc.FeeEstimator,
  3485  		Signer:                  s.cc.Wallet.Cfg.Signer,
  3486  		SigPool:                 s.sigPool,
  3487  		Wallet:                  s.cc.Wallet,
  3488  		ChainNotifier:           s.cc.ChainNotifier,
  3489  		RoutingPolicy:           s.cc.RoutingPolicy,
  3490  		Sphinx:                  s.sphinx,
  3491  		WitnessBeacon:           s.witnessBeacon,
  3492  		Invoices:                s.invoices,
  3493  		ChannelNotifier:         s.channelNotifier,
  3494  		HtlcNotifier:            s.htlcNotifier,
  3495  		TowerClient:             s.towerClient,
  3496  		AnchorTowerClient:       s.anchorTowerClient,
  3497  		DisconnectPeer:          s.DisconnectPeer,
  3498  		GenNodeAnnouncement:     s.genNodeAnnouncement,
  3499  
  3500  		PongBuf: s.pongBuf,
  3501  
  3502  		PrunePersistentPeerConnection: s.prunePersistentPeerConnection,
  3503  
  3504  		FetchLastChanUpdate: s.fetchLastChanUpdate(),
  3505  
  3506  		FundingManager: s.fundingMgr,
  3507  
  3508  		ChainParams: s.cfg.ActiveNetParams.Params,
  3509  
  3510  		Hodl:                    s.cfg.Hodl,
  3511  		UnsafeReplay:            s.cfg.UnsafeReplay,
  3512  		MaxOutgoingCltvExpiry:   s.cfg.MaxOutgoingCltvExpiry,
  3513  		MaxChannelFeeAllocation: s.cfg.MaxChannelFeeAllocation,
  3514  		CoopCloseTargetConfs:    s.cfg.CoopCloseTargetConfs,
  3515  		MaxAnchorsCommitFeeRate: chainfee.AtomPerKByte(
  3516  			s.cfg.MaxCommitFeeRateAnchors * 1000),
  3517  		ChannelCommitInterval:  s.cfg.ChannelCommitInterval,
  3518  		ChannelCommitBatchSize: s.cfg.ChannelCommitBatchSize,
  3519  		HandleCustomMessage:    s.handleCustomMessage,
  3520  		Quit:                   s.quit,
  3521  	}
  3522  
  3523  	copy(pCfg.PubKeyBytes[:], peerAddr.IdentityKey.SerializeCompressed())
  3524  	copy(pCfg.ServerPubKey[:], s.identityECDH.PubKey().SerializeCompressed())
  3525  
  3526  	p := peer.NewBrontide(pCfg)
  3527  
  3528  	// TODO(roasbeef): update IP address for link-node
  3529  	//  * also mark last-seen, do it one single transaction?
  3530  
  3531  	s.addPeer(p)
  3532  
  3533  	// Once we have successfully added the peer to the server, we can
  3534  	// delete the previous error buffer from the server's map of error
  3535  	// buffers.
  3536  	delete(s.peerErrors, pkStr)
  3537  
  3538  	// Dispatch a goroutine to asynchronously start the peer. This process
  3539  	// includes sending and receiving Init messages, which would be a DOS
  3540  	// vector if we held the server's mutex throughout the procedure.
  3541  	s.wg.Add(1)
  3542  	go s.peerInitializer(p)
  3543  }
  3544  
  3545  // addPeer adds the passed peer to the server's global state of all active
  3546  // peers.
  3547  func (s *server) addPeer(p *peer.Brontide) {
  3548  	if p == nil {
  3549  		return
  3550  	}
  3551  
  3552  	// Ignore new peers if we're shutting down.
  3553  	if s.Stopped() {
  3554  		p.Disconnect(ErrServerShuttingDown)
  3555  		return
  3556  	}
  3557  
  3558  	// Track the new peer in our indexes so we can quickly look it up either
  3559  	// according to its public key, or its peer ID.
  3560  	// TODO(roasbeef): pipe all requests through to the
  3561  	// queryHandler/peerManager
  3562  
  3563  	pubSer := p.IdentityKey().SerializeCompressed()
  3564  	pubStr := string(pubSer)
  3565  
  3566  	s.peersByPub[pubStr] = p
  3567  
  3568  	if p.Inbound() {
  3569  		s.inboundPeers[pubStr] = p
  3570  	} else {
  3571  		s.outboundPeers[pubStr] = p
  3572  	}
  3573  
  3574  	// Inform the peer notifier of a peer online event so that it can be reported
  3575  	// to clients listening for peer events.
  3576  	var pubKey [33]byte
  3577  	copy(pubKey[:], pubSer)
  3578  
  3579  	s.peerNotifier.NotifyPeerOnline(pubKey)
  3580  }
  3581  
  3582  // peerInitializer asynchronously starts a newly connected peer after it has
  3583  // been added to the server's peer map. This method sets up a
  3584  // peerTerminationWatcher for the given peer, and ensures that it executes even
  3585  // if the peer failed to start. In the event of a successful connection, this
  3586  // method reads the negotiated, local feature-bits and spawns the appropriate
  3587  // graph synchronization method. Any registered clients of NotifyWhenOnline will
  3588  // be signaled of the new peer once the method returns.
  3589  //
  3590  // NOTE: This MUST be launched as a goroutine.
  3591  func (s *server) peerInitializer(p *peer.Brontide) {
  3592  	defer s.wg.Done()
  3593  
  3594  	// Avoid initializing peers while the server is exiting.
  3595  	if s.Stopped() {
  3596  		return
  3597  	}
  3598  
  3599  	// Create a channel that will be used to signal a successful start of
  3600  	// the link. This prevents the peer termination watcher from beginning
  3601  	// its duty too early.
  3602  	ready := make(chan struct{})
  3603  
  3604  	// Before starting the peer, launch a goroutine to watch for the
  3605  	// unexpected termination of this peer, which will ensure all resources
  3606  	// are properly cleaned up, and re-establish persistent connections when
  3607  	// necessary. The peer termination watcher will be short circuited if
  3608  	// the peer is ever added to the ignorePeerTermination map, indicating
  3609  	// that the server has already handled the removal of this peer.
  3610  	s.wg.Add(1)
  3611  	go s.peerTerminationWatcher(p, ready)
  3612  
  3613  	// Start the peer! If an error occurs, we Disconnect the peer, which
  3614  	// will unblock the peerTerminationWatcher.
  3615  	if err := p.Start(); err != nil {
  3616  		p.Disconnect(fmt.Errorf("unable to start peer: %v", err))
  3617  		return
  3618  	}
  3619  
  3620  	// Otherwise, signal to the peerTerminationWatcher that the peer startup
  3621  	// was successful, and to begin watching the peer's wait group.
  3622  	close(ready)
  3623  
  3624  	pubStr := string(p.IdentityKey().SerializeCompressed())
  3625  
  3626  	s.mu.Lock()
  3627  	defer s.mu.Unlock()
  3628  
  3629  	// Check if there are listeners waiting for this peer to come online.
  3630  	srvrLog.Debugf("Notifying that peer %v is online", p)
  3631  	for _, peerChan := range s.peerConnectedListeners[pubStr] {
  3632  		select {
  3633  		case peerChan <- p:
  3634  		case <-s.quit:
  3635  			return
  3636  		}
  3637  	}
  3638  	delete(s.peerConnectedListeners, pubStr)
  3639  }
  3640  
  3641  // peerTerminationWatcher waits until a peer has been disconnected unexpectedly,
  3642  // and then cleans up all resources allocated to the peer, notifies relevant
  3643  // sub-systems of its demise, and finally handles re-connecting to the peer if
  3644  // it's persistent. If the server intentionally disconnects a peer, it should
  3645  // have a corresponding entry in the ignorePeerTermination map which will cause
  3646  // the cleanup routine to exit early. The passed `ready` chan is used to
  3647  // synchronize when WaitForDisconnect should begin watching on the peer's
  3648  // waitgroup. The ready chan should only be signaled if the peer starts
  3649  // successfully, otherwise the peer should be disconnected instead.
  3650  //
  3651  // NOTE: This MUST be launched as a goroutine.
  3652  func (s *server) peerTerminationWatcher(p *peer.Brontide, ready chan struct{}) {
  3653  	defer s.wg.Done()
  3654  
  3655  	p.WaitForDisconnect(ready)
  3656  
  3657  	srvrLog.Debugf("Peer %v has been disconnected", p)
  3658  
  3659  	// If the server is exiting then we can bail out early ourselves as all
  3660  	// the other sub-systems will already be shutting down.
  3661  	if s.Stopped() {
  3662  		return
  3663  	}
  3664  
  3665  	// Next, we'll cancel all pending funding reservations with this node.
  3666  	// If we tried to initiate any funding flows that haven't yet finished,
  3667  	// then we need to unlock those committed outputs so they're still
  3668  	// available for use.
  3669  	s.fundingMgr.CancelPeerReservations(p.PubKey())
  3670  
  3671  	pubKey := p.IdentityKey()
  3672  
  3673  	// We'll also inform the gossiper that this peer is no longer active,
  3674  	// so we don't need to maintain sync state for it any longer.
  3675  	s.authGossiper.PruneSyncState(p.PubKey())
  3676  
  3677  	// Tell the switch to remove all links associated with this peer.
  3678  	// Passing nil as the target link indicates that all links associated
  3679  	// with this interface should be closed.
  3680  	//
  3681  	// TODO(roasbeef): instead add a PurgeInterfaceLinks function?
  3682  	links, err := s.htlcSwitch.GetLinksByInterface(p.PubKey())
  3683  	if err != nil && err != htlcswitch.ErrNoLinksFound {
  3684  		srvrLog.Errorf("Unable to get channel links for %v: %v", p, err)
  3685  	}
  3686  
  3687  	for _, link := range links {
  3688  		s.htlcSwitch.RemoveLink(link.ChanID())
  3689  	}
  3690  
  3691  	s.mu.Lock()
  3692  	defer s.mu.Unlock()
  3693  
  3694  	// If there were any notification requests for when this peer
  3695  	// disconnected, we can trigger them now.
  3696  	srvrLog.Debugf("Notifying that peer %v is offline", p)
  3697  	pubStr := string(pubKey.SerializeCompressed())
  3698  	for _, offlineChan := range s.peerDisconnectedListeners[pubStr] {
  3699  		close(offlineChan)
  3700  	}
  3701  	delete(s.peerDisconnectedListeners, pubStr)
  3702  
  3703  	// If the server has already removed this peer, we can short circuit the
  3704  	// peer termination watcher and skip cleanup.
  3705  	if _, ok := s.ignorePeerTermination[p]; ok {
  3706  		delete(s.ignorePeerTermination, p)
  3707  
  3708  		pubKey := p.PubKey()
  3709  		pubStr := string(pubKey[:])
  3710  
  3711  		// If a connection callback is present, we'll go ahead and
  3712  		// execute it now that previous peer has fully disconnected. If
  3713  		// the callback is not present, this likely implies the peer was
  3714  		// purposefully disconnected via RPC, and that no reconnect
  3715  		// should be attempted.
  3716  		connCallback, ok := s.scheduledPeerConnection[pubStr]
  3717  		if ok {
  3718  			delete(s.scheduledPeerConnection, pubStr)
  3719  			connCallback()
  3720  		}
  3721  		return
  3722  	}
  3723  
  3724  	// First, cleanup any remaining state the server has regarding the peer
  3725  	// in question.
  3726  	s.removePeer(p)
  3727  
  3728  	// Next, check to see if this is a persistent peer or not.
  3729  	if _, ok := s.persistentPeers[pubStr]; !ok {
  3730  		return
  3731  	}
  3732  
  3733  	// Get the last address that we used to connect to the peer.
  3734  	var addrs []net.Addr
  3735  
  3736  	// We'll ensure that we locate all the peers advertised addresses for
  3737  	// reconnection purposes.
  3738  	advertisedAddrs, err := s.fetchNodeAdvertisedAddrs(pubKey)
  3739  	switch {
  3740  	// We found advertised addresses, so use them.
  3741  	case err == nil:
  3742  		addrs = advertisedAddrs
  3743  
  3744  	// The peer doesn't have an advertised address.
  3745  	case err == errNoAdvertisedAddr:
  3746  		// Do not attempt to re-connect if the only
  3747  		// address we have for this peer was due to an
  3748  		// inbound connection, since this is unlikely
  3749  		// to succeed.
  3750  		srvrLog.Debugf("Ignoring reconnection attempt "+
  3751  			"to inbound peer %v without "+
  3752  			"advertised address", p)
  3753  		return
  3754  
  3755  	// We came across an error retrieving an advertised
  3756  	// address, log it, and fall back to the existing peer
  3757  	// address.
  3758  	default:
  3759  		srvrLog.Errorf("Unable to retrieve advertised "+
  3760  			"address for node %x: %v", p.PubKey(),
  3761  			err)
  3762  		return
  3763  	}
  3764  
  3765  	// Make an easy lookup map so that we can check if an address
  3766  	// is already in the address list that we have stored for this peer.
  3767  	existingAddrs := make(map[string]bool)
  3768  	for _, addr := range s.persistentPeerAddrs[pubStr] {
  3769  		existingAddrs[addr.String()] = true
  3770  	}
  3771  
  3772  	// Add any missing addresses for this peer to persistentPeerAddr.
  3773  	for _, addr := range addrs {
  3774  		if existingAddrs[addr.String()] {
  3775  			continue
  3776  		}
  3777  
  3778  		s.persistentPeerAddrs[pubStr] = append(
  3779  			s.persistentPeerAddrs[pubStr],
  3780  			&lnwire.NetAddress{
  3781  				IdentityKey: p.IdentityKey(),
  3782  				Address:     addr,
  3783  				ChainNet:    p.NetAddress().ChainNet,
  3784  			},
  3785  		)
  3786  	}
  3787  
  3788  	// Record the computed backoff in the backoff map.
  3789  	backoff := s.nextPeerBackoff(pubStr, p.StartTime())
  3790  	s.persistentPeersBackoff[pubStr] = backoff
  3791  
  3792  	// Initialize a retry canceller for this peer if one does not
  3793  	// exist.
  3794  	cancelChan, ok := s.persistentRetryCancels[pubStr]
  3795  	if !ok {
  3796  		cancelChan = make(chan struct{})
  3797  		s.persistentRetryCancels[pubStr] = cancelChan
  3798  	}
  3799  
  3800  	// We choose not to wait group this go routine since the Connect
  3801  	// call can stall for arbitrarily long if we shutdown while an
  3802  	// outbound connection attempt is being made.
  3803  	go func() {
  3804  		srvrLog.Debugf("Scheduling connection re-establishment to "+
  3805  			"persistent peer %x in %s",
  3806  			p.IdentityKey().SerializeCompressed(), backoff)
  3807  
  3808  		select {
  3809  		case <-time.After(backoff):
  3810  		case <-cancelChan:
  3811  			return
  3812  		case <-s.quit:
  3813  			return
  3814  		}
  3815  
  3816  		srvrLog.Debugf("Attempting to re-establish persistent "+
  3817  			"connection to peer %x",
  3818  			p.IdentityKey().SerializeCompressed())
  3819  
  3820  		s.connectToPersistentPeer(pubStr)
  3821  	}()
  3822  }
  3823  
  3824  // connectToPersistentPeer uses all the stored addresses for a peer to attempt
  3825  // to connect to the peer. It creates connection requests if there are
  3826  // currently none for a given address and it removes old connection requests
  3827  // if the associated address is no longer in the latest address list for the
  3828  // peer.
  3829  func (s *server) connectToPersistentPeer(pubKeyStr string) {
  3830  	s.mu.Lock()
  3831  	defer s.mu.Unlock()
  3832  
  3833  	// Create an easy lookup map of the addresses we have stored for the
  3834  	// peer. We will remove entries from this map if we have existing
  3835  	// connection requests for the associated address and then any leftover
  3836  	// entries will indicate which addresses we should create new
  3837  	// connection requests for.
  3838  	addrMap := make(map[string]*lnwire.NetAddress)
  3839  	for _, addr := range s.persistentPeerAddrs[pubKeyStr] {
  3840  		addrMap[addr.String()] = addr
  3841  	}
  3842  
  3843  	// Go through each of the existing connection requests and
  3844  	// check if they correspond to the latest set of addresses. If
  3845  	// there is a connection requests that does not use one of the latest
  3846  	// advertised addresses then remove that connection request.
  3847  	var updatedConnReqs []*connmgr.ConnReq
  3848  	for _, connReq := range s.persistentConnReqs[pubKeyStr] {
  3849  		lnAddr := connReq.Addr.(*lnwire.NetAddress).Address.String()
  3850  
  3851  		switch _, ok := addrMap[lnAddr]; ok {
  3852  		// If the existing connection request is using one of the
  3853  		// latest advertised addresses for the peer then we add it to
  3854  		// updatedConnReqs and remove the associated address from
  3855  		// addrMap so that we don't recreate this connReq later on.
  3856  		case true:
  3857  			updatedConnReqs = append(
  3858  				updatedConnReqs, connReq,
  3859  			)
  3860  			delete(addrMap, lnAddr)
  3861  
  3862  		// If the existing connection request is using an address that
  3863  		// is not one of the latest advertised addresses for the peer
  3864  		// then we remove the connecting request from the connection
  3865  		// manager.
  3866  		case false:
  3867  			srvrLog.Info(
  3868  				"Removing conn req:", connReq.Addr.String(),
  3869  			)
  3870  			s.connMgr.Remove(connReq.ID())
  3871  		}
  3872  	}
  3873  
  3874  	s.persistentConnReqs[pubKeyStr] = updatedConnReqs
  3875  
  3876  	cancelChan, ok := s.persistentRetryCancels[pubKeyStr]
  3877  	if !ok {
  3878  		cancelChan = make(chan struct{})
  3879  		s.persistentRetryCancels[pubKeyStr] = cancelChan
  3880  	}
  3881  
  3882  	// Any addresses left in addrMap are new ones that we have not made
  3883  	// connection requests for. So create new connection requests for those.
  3884  	// If there is more than one address in the address map, stagger the
  3885  	// creation of the connection requests for those.
  3886  	go func() {
  3887  		ticker := time.NewTicker(multiAddrConnectionStagger)
  3888  		defer ticker.Stop()
  3889  
  3890  		for _, addr := range addrMap {
  3891  			// Send the persistent connection request to the
  3892  			// connection manager, saving the request itself so we
  3893  			// can cancel/restart the process as needed.
  3894  			connReq := &connmgr.ConnReq{
  3895  				Addr:      addr,
  3896  				Permanent: true,
  3897  			}
  3898  
  3899  			s.mu.Lock()
  3900  			s.persistentConnReqs[pubKeyStr] = append(
  3901  				s.persistentConnReqs[pubKeyStr], connReq,
  3902  			)
  3903  			s.mu.Unlock()
  3904  
  3905  			srvrLog.Debugf("Attempting persistent connection to "+
  3906  				"channel peer %v", addr)
  3907  
  3908  			go s.connMgr.Connect(connReq)
  3909  
  3910  			select {
  3911  			case <-s.quit:
  3912  				return
  3913  			case <-cancelChan:
  3914  				return
  3915  			case <-ticker.C:
  3916  			}
  3917  		}
  3918  	}()
  3919  }
  3920  
  3921  // removePeer removes the passed peer from the server's state of all active
  3922  // peers.
  3923  func (s *server) removePeer(p *peer.Brontide) {
  3924  	if p == nil {
  3925  		return
  3926  	}
  3927  
  3928  	srvrLog.Debugf("removing peer %v", p)
  3929  
  3930  	// As the peer is now finished, ensure that the TCP connection is
  3931  	// closed and all of its related goroutines have exited.
  3932  	p.Disconnect(fmt.Errorf("server: disconnecting peer %v", p))
  3933  
  3934  	// If this peer had an active persistent connection request, remove it.
  3935  	if p.ConnReq() != nil {
  3936  		s.connMgr.Remove(p.ConnReq().ID())
  3937  	}
  3938  
  3939  	// Ignore deleting peers if we're shutting down.
  3940  	if s.Stopped() {
  3941  		return
  3942  	}
  3943  
  3944  	pKey := p.PubKey()
  3945  	pubSer := pKey[:]
  3946  	pubStr := string(pubSer)
  3947  
  3948  	delete(s.peersByPub, pubStr)
  3949  
  3950  	if p.Inbound() {
  3951  		delete(s.inboundPeers, pubStr)
  3952  	} else {
  3953  		delete(s.outboundPeers, pubStr)
  3954  	}
  3955  
  3956  	// Copy the peer's error buffer across to the server if it has any items
  3957  	// in it so that we can restore peer errors across connections.
  3958  	if p.ErrorBuffer().Total() > 0 {
  3959  		s.peerErrors[pubStr] = p.ErrorBuffer()
  3960  	}
  3961  
  3962  	// Inform the peer notifier of a peer offline event so that it can be
  3963  	// reported to clients listening for peer events.
  3964  	var pubKey [33]byte
  3965  	copy(pubKey[:], pubSer)
  3966  
  3967  	s.peerNotifier.NotifyPeerOffline(pubKey)
  3968  }
  3969  
  3970  // ConnectToPeer requests that the server connect to a Lightning Network peer
  3971  // at the specified address. This function will *block* until either a
  3972  // connection is established, or the initial handshake process fails.
  3973  //
  3974  // NOTE: This function is safe for concurrent access.
  3975  func (s *server) ConnectToPeer(addr *lnwire.NetAddress,
  3976  	perm bool, timeout time.Duration) error {
  3977  
  3978  	targetPub := string(addr.IdentityKey.SerializeCompressed())
  3979  
  3980  	// Acquire mutex, but use explicit unlocking instead of defer for
  3981  	// better granularity.  In certain conditions, this method requires
  3982  	// making an outbound connection to a remote peer, which requires the
  3983  	// lock to be released, and subsequently reacquired.
  3984  	s.mu.Lock()
  3985  
  3986  	// Ensure we're not already connected to this peer.
  3987  	peer, err := s.findPeerByPubStr(targetPub)
  3988  	if err == nil {
  3989  		s.mu.Unlock()
  3990  		return &errPeerAlreadyConnected{peer: peer}
  3991  	}
  3992  
  3993  	// Peer was not found, continue to pursue connection with peer.
  3994  
  3995  	// If there's already a pending connection request for this pubkey,
  3996  	// then we ignore this request to ensure we don't create a redundant
  3997  	// connection.
  3998  	if reqs, ok := s.persistentConnReqs[targetPub]; ok {
  3999  		srvrLog.Warnf("Already have %d persistent connection "+
  4000  			"requests for %v, connecting anyway.", len(reqs), addr)
  4001  	}
  4002  
  4003  	// If there's not already a pending or active connection to this node,
  4004  	// then instruct the connection manager to attempt to establish a
  4005  	// persistent connection to the peer.
  4006  	srvrLog.Debugf("Connecting to %v", addr)
  4007  	if perm {
  4008  		connReq := &connmgr.ConnReq{
  4009  			Addr:      addr,
  4010  			Permanent: true,
  4011  		}
  4012  
  4013  		// Since the user requested a permanent connection, we'll set
  4014  		// the entry to true which will tell the server to continue
  4015  		// reconnecting even if the number of channels with this peer is
  4016  		// zero.
  4017  		s.persistentPeers[targetPub] = true
  4018  		if _, ok := s.persistentPeersBackoff[targetPub]; !ok {
  4019  			s.persistentPeersBackoff[targetPub] = s.cfg.MinBackoff
  4020  		}
  4021  		s.persistentConnReqs[targetPub] = append(
  4022  			s.persistentConnReqs[targetPub], connReq,
  4023  		)
  4024  		s.mu.Unlock()
  4025  
  4026  		go s.connMgr.Connect(connReq)
  4027  
  4028  		return nil
  4029  	}
  4030  	s.mu.Unlock()
  4031  
  4032  	// If we're not making a persistent connection, then we'll attempt to
  4033  	// connect to the target peer. If the we can't make the connection, or
  4034  	// the crypto negotiation breaks down, then return an error to the
  4035  	// caller.
  4036  	errChan := make(chan error, 1)
  4037  	s.connectToPeer(addr, errChan, timeout)
  4038  
  4039  	select {
  4040  	case err := <-errChan:
  4041  		return err
  4042  	case <-s.quit:
  4043  		return ErrServerShuttingDown
  4044  	}
  4045  }
  4046  
  4047  // connectToPeer establishes a connection to a remote peer. errChan is used to
  4048  // notify the caller if the connection attempt has failed. Otherwise, it will be
  4049  // closed.
  4050  func (s *server) connectToPeer(addr *lnwire.NetAddress,
  4051  	errChan chan<- error, timeout time.Duration) {
  4052  
  4053  	conn, err := brontide.Dial(
  4054  		s.identityECDH, addr, timeout, s.cfg.net.Dial,
  4055  	)
  4056  	if err != nil {
  4057  		srvrLog.Errorf("Unable to connect to %v: %v", addr, err)
  4058  		select {
  4059  		case errChan <- err:
  4060  		case <-s.quit:
  4061  		}
  4062  		return
  4063  	}
  4064  
  4065  	close(errChan)
  4066  
  4067  	srvrLog.Tracef("Brontide dialer made local=%v, remote=%v",
  4068  		conn.LocalAddr(), conn.RemoteAddr())
  4069  
  4070  	s.OutboundPeerConnected(nil, conn)
  4071  }
  4072  
  4073  // DisconnectPeer sends the request to server to close the connection with peer
  4074  // identified by public key.
  4075  //
  4076  // NOTE: This function is safe for concurrent access.
  4077  func (s *server) DisconnectPeer(pubKey *secp256k1.PublicKey) error {
  4078  	pubBytes := pubKey.SerializeCompressed()
  4079  	pubStr := string(pubBytes)
  4080  
  4081  	s.mu.Lock()
  4082  	defer s.mu.Unlock()
  4083  
  4084  	// Check that were actually connected to this peer. If not, then we'll
  4085  	// exit in an error as we can't disconnect from a peer that we're not
  4086  	// currently connected to.
  4087  	peer, err := s.findPeerByPubStr(pubStr)
  4088  	if err == ErrPeerNotConnected {
  4089  		return fmt.Errorf("peer %x is not connected", pubBytes)
  4090  	}
  4091  
  4092  	srvrLog.Infof("Disconnecting from %v", peer)
  4093  
  4094  	s.cancelConnReqs(pubStr, nil)
  4095  
  4096  	// If this peer was formerly a persistent connection, then we'll remove
  4097  	// them from this map so we don't attempt to re-connect after we
  4098  	// disconnect.
  4099  	delete(s.persistentPeers, pubStr)
  4100  	delete(s.persistentPeersBackoff, pubStr)
  4101  
  4102  	// Remove the peer by calling Disconnect. Previously this was done with
  4103  	// removePeer, which bypassed the peerTerminationWatcher.
  4104  	peer.Disconnect(fmt.Errorf("server: DisconnectPeer called"))
  4105  
  4106  	return nil
  4107  }
  4108  
  4109  // OpenChannel sends a request to the server to open a channel to the specified
  4110  // peer identified by nodeKey with the passed channel funding parameters.
  4111  //
  4112  // NOTE: This function is safe for concurrent access.
  4113  func (s *server) OpenChannel(
  4114  	req *funding.InitFundingMsg) (chan *lnrpc.OpenStatusUpdate, chan error) {
  4115  
  4116  	// The updateChan will have a buffer of 2, since we expect a ChanPending
  4117  	// + a ChanOpen update, and we want to make sure the funding process is
  4118  	// not blocked if the caller is not reading the updates.
  4119  	req.Updates = make(chan *lnrpc.OpenStatusUpdate, 2)
  4120  	req.Err = make(chan error, 1)
  4121  
  4122  	// First attempt to locate the target peer to open a channel with, if
  4123  	// we're unable to locate the peer then this request will fail.
  4124  	pubKeyBytes := req.TargetPubkey.SerializeCompressed()
  4125  	s.mu.RLock()
  4126  	peer, ok := s.peersByPub[string(pubKeyBytes)]
  4127  	if !ok {
  4128  		s.mu.RUnlock()
  4129  
  4130  		req.Err <- fmt.Errorf("peer %x is not online", pubKeyBytes)
  4131  		return req.Updates, req.Err
  4132  	}
  4133  	req.Peer = peer
  4134  	s.mu.RUnlock()
  4135  
  4136  	// We'll wait until the peer is active before beginning the channel
  4137  	// opening process.
  4138  	select {
  4139  	case <-peer.ActiveSignal():
  4140  	case <-peer.QuitSignal():
  4141  		req.Err <- fmt.Errorf("peer %x disconnected", pubKeyBytes)
  4142  		return req.Updates, req.Err
  4143  	case <-s.quit:
  4144  		req.Err <- ErrServerShuttingDown
  4145  		return req.Updates, req.Err
  4146  	}
  4147  
  4148  	// If the fee rate wasn't specified, then we'll use a default
  4149  	// confirmation target.
  4150  	if req.FundingFeePerKB == 0 {
  4151  		estimator := s.cc.FeeEstimator
  4152  		feeRate, err := estimator.EstimateFeePerKB(6)
  4153  		if err != nil {
  4154  			req.Err <- err
  4155  			return req.Updates, req.Err
  4156  		}
  4157  		req.FundingFeePerKB = feeRate
  4158  	}
  4159  
  4160  	// Spawn a goroutine to send the funding workflow request to the funding
  4161  	// manager. This allows the server to continue handling queries instead
  4162  	// of blocking on this request which is exported as a synchronous
  4163  	// request to the outside world.
  4164  	go s.fundingMgr.InitFundingWorkflow(req)
  4165  
  4166  	return req.Updates, req.Err
  4167  }
  4168  
  4169  // Peers returns a slice of all active peers.
  4170  //
  4171  // NOTE: This function is safe for concurrent access.
  4172  func (s *server) Peers() []*peer.Brontide {
  4173  	s.mu.RLock()
  4174  	defer s.mu.RUnlock()
  4175  
  4176  	peers := make([]*peer.Brontide, 0, len(s.peersByPub))
  4177  	for _, peer := range s.peersByPub {
  4178  		peers = append(peers, peer)
  4179  	}
  4180  
  4181  	return peers
  4182  }
  4183  
  4184  // parseHexColor takes a hex string representation of a color in the
  4185  // form "#RRGGBB", parses the hex color values, and returns a color.RGBA
  4186  // struct of the same color.
  4187  func parseHexColor(colorStr string) (color.RGBA, error) {
  4188  	// Check if the hex color string is a valid color representation.
  4189  	if !validColorRegexp.MatchString(colorStr) {
  4190  		return color.RGBA{}, errors.New("Color must be specified " +
  4191  			"using a hexadecimal value in the form #RRGGBB")
  4192  	}
  4193  
  4194  	// Decode the hex color string to bytes.
  4195  	// The resulting byte array is in the form [R, G, B].
  4196  	colorBytes, err := hex.DecodeString(colorStr[1:])
  4197  	if err != nil {
  4198  		return color.RGBA{}, err
  4199  	}
  4200  
  4201  	return color.RGBA{R: colorBytes[0], G: colorBytes[1], B: colorBytes[2]}, nil
  4202  }
  4203  
  4204  // computeNextBackoff uses a truncated exponential backoff to compute the next
  4205  // backoff using the value of the exiting backoff. The returned duration is
  4206  // randomized in either direction by 1/20 to prevent tight loops from
  4207  // stabilizing.
  4208  func computeNextBackoff(currBackoff, maxBackoff time.Duration) time.Duration {
  4209  	// Double the current backoff, truncating if it exceeds our maximum.
  4210  	nextBackoff := 2 * currBackoff
  4211  	if nextBackoff > maxBackoff {
  4212  		nextBackoff = maxBackoff
  4213  	}
  4214  
  4215  	// Using 1/10 of our duration as a margin, compute a random offset to
  4216  	// avoid the nodes entering connection cycles.
  4217  	margin := nextBackoff / 10
  4218  
  4219  	var wiggle big.Int
  4220  	wiggle.SetUint64(uint64(margin))
  4221  	if _, err := rand.Int(rand.Reader, &wiggle); err != nil {
  4222  		// Randomizing is not mission critical, so we'll just return the
  4223  		// current backoff.
  4224  		return nextBackoff
  4225  	}
  4226  
  4227  	// Otherwise add in our wiggle, but subtract out half of the margin so
  4228  	// that the backoff can tweaked by 1/20 in either direction.
  4229  	return nextBackoff + (time.Duration(wiggle.Uint64()) - margin/2)
  4230  }
  4231  
  4232  // errNoAdvertisedAddr is an error returned when we attempt to retrieve the
  4233  // advertised address of a node, but they don't have one.
  4234  var errNoAdvertisedAddr = errors.New("no advertised address found")
  4235  
  4236  // fetchNodeAdvertisedAddrs attempts to fetch the advertised addresses of a node.
  4237  func (s *server) fetchNodeAdvertisedAddrs(pub *secp256k1.PublicKey) ([]net.Addr, error) {
  4238  	vertex, err := route.NewVertexFromBytes(pub.SerializeCompressed())
  4239  	if err != nil {
  4240  		return nil, err
  4241  	}
  4242  
  4243  	node, err := s.graphDB.FetchLightningNode(vertex)
  4244  	if err != nil {
  4245  		return nil, err
  4246  	}
  4247  
  4248  	if len(node.Addresses) == 0 {
  4249  		return nil, errNoAdvertisedAddr
  4250  	}
  4251  
  4252  	return node.Addresses, nil
  4253  }
  4254  
  4255  // fetchLastChanUpdate returns a function which is able to retrieve our latest
  4256  // channel update for a target channel.
  4257  func (s *server) fetchLastChanUpdate() func(lnwire.ShortChannelID) (
  4258  	*lnwire.ChannelUpdate, error) {
  4259  
  4260  	ourPubKey := s.identityECDH.PubKey().SerializeCompressed()
  4261  	return func(cid lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) {
  4262  		info, edge1, edge2, err := s.chanRouter.GetChannelByID(cid)
  4263  		if err != nil {
  4264  			return nil, err
  4265  		}
  4266  
  4267  		return netann.ExtractChannelUpdate(
  4268  			ourPubKey[:], info, edge1, edge2,
  4269  		)
  4270  	}
  4271  }
  4272  
  4273  // applyChannelUpdate applies the channel update to the different sub-systems of
  4274  // the server.
  4275  func (s *server) applyChannelUpdate(update *lnwire.ChannelUpdate) error {
  4276  	errChan := s.authGossiper.ProcessLocalAnnouncement(update)
  4277  	select {
  4278  	case err := <-errChan:
  4279  		return err
  4280  	case <-s.quit:
  4281  		return ErrServerShuttingDown
  4282  	}
  4283  }
  4284  
  4285  // SendCustomMessage sends a custom message to the peer with the specified
  4286  // pubkey.
  4287  func (s *server) SendCustomMessage(peerPub [33]byte, msgType lnwire.MessageType,
  4288  	data []byte) error {
  4289  
  4290  	peer, err := s.FindPeerByPubStr(string(peerPub[:]))
  4291  	if err != nil {
  4292  		return err
  4293  	}
  4294  
  4295  	// We'll wait until the peer is active.
  4296  	select {
  4297  	case <-peer.ActiveSignal():
  4298  	case <-peer.QuitSignal():
  4299  		return fmt.Errorf("peer %x disconnected", peerPub)
  4300  	case <-s.quit:
  4301  		return ErrServerShuttingDown
  4302  	}
  4303  
  4304  	msg, err := lnwire.NewCustom(msgType, data)
  4305  	if err != nil {
  4306  		return err
  4307  	}
  4308  
  4309  	// Send the message as low-priority. For now we assume that all
  4310  	// application-defined message are low priority.
  4311  	return peer.SendMessageLazy(true, msg)
  4312  }
  4313  
  4314  // newSweepPkScriptGen creates closure that generates a new public key script
  4315  // which should be used to sweep any funds into the on-chain wallet.
  4316  // Specifically, the script generated is a version 0, pay-to-witness-pubkey-hash
  4317  // (p2wkh) output.
  4318  func newSweepPkScriptGen(
  4319  	wallet lnwallet.WalletController) func() ([]byte, error) {
  4320  
  4321  	return func() ([]byte, error) {
  4322  		sweepAddr, err := wallet.NewAddress(
  4323  			lnwallet.PubKeyHash, false, lnwallet.DefaultAccountName,
  4324  		)
  4325  		if err != nil {
  4326  			return nil, err
  4327  		}
  4328  
  4329  		return input.PayToAddrScript(sweepAddr)
  4330  	}
  4331  }
  4332  
  4333  // shouldPeerBootstrap returns true if we should attempt to perform peer
  4334  // boostrapping to actively seek our peers using the set of active network
  4335  // bootsrappers.
  4336  func shouldPeerBootstrap(cfg *Config) bool {
  4337  	isSimnet := cfg.Decred.SimNet
  4338  	isRegtest := cfg.Decred.RegTest
  4339  	isDevNetwork := isSimnet || isRegtest
  4340  
  4341  	// TODO(yy): remove the check on simnet/regtest such that the itest is
  4342  	// covering the bootstrapping process.
  4343  	return !cfg.NoNetBootstrap && !isDevNetwork
  4344  }