github.com/decred/dcrlnd@v0.7.6/funding/manager.go (about)

     1  package funding
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"fmt"
     7  	"io"
     8  	"net"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/davecgh/go-spew/spew"
    13  	"github.com/decred/dcrd/chaincfg/chainhash"
    14  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    15  	"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
    16  	"github.com/decred/dcrd/dcrutil/v4"
    17  	"github.com/decred/dcrd/wire"
    18  	"github.com/decred/dcrlnd/chainntnfs"
    19  	"github.com/decred/dcrlnd/chainreg"
    20  	"github.com/decred/dcrlnd/chanacceptor"
    21  	"github.com/decred/dcrlnd/channeldb"
    22  	"github.com/decred/dcrlnd/discovery"
    23  	"github.com/decred/dcrlnd/htlcswitch"
    24  	"github.com/decred/dcrlnd/input"
    25  	"github.com/decred/dcrlnd/keychain"
    26  	"github.com/decred/dcrlnd/labels"
    27  	"github.com/decred/dcrlnd/lnpeer"
    28  	"github.com/decred/dcrlnd/lnrpc"
    29  	"github.com/decred/dcrlnd/lnwallet"
    30  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    31  	"github.com/decred/dcrlnd/lnwallet/chanfunding"
    32  	"github.com/decred/dcrlnd/lnwire"
    33  	"github.com/decred/dcrlnd/routing"
    34  	"github.com/go-errors/errors"
    35  	"golang.org/x/crypto/salsa20"
    36  )
    37  
    38  var (
    39  	// byteOrder defines the endian-ness we use for encoding to and from
    40  	// buffers.
    41  	byteOrder = binary.BigEndian
    42  )
    43  
    44  // writeOutpoint writes an outpoint to an io.Writer. This is not the same as
    45  // the channeldb variant as this uses WriteVarBytes for the Hash.
    46  //
    47  // Note(decred): This does not include the Tree field of the outpoint.
    48  func writeOutpoint(w io.Writer, o *wire.OutPoint) error {
    49  	scratch := make([]byte, 4)
    50  
    51  	if err := wire.WriteVarBytes(w, 0, o.Hash[:]); err != nil {
    52  		return err
    53  	}
    54  
    55  	byteOrder.PutUint32(scratch, o.Index)
    56  	_, err := w.Write(scratch)
    57  
    58  	// Note(decred): this does not include the Tree field of the outpoint.
    59  
    60  	return err
    61  }
    62  
    63  const (
    64  	// MinDcrRemoteDelay and maxDcrRemoteDelay are the extremes of the
    65  	// Decred CSV delay we will require the remote to use for its
    66  	// commitment transaction. The actual delay we will require will be
    67  	// somewhere between these values, depending on channel size.
    68  	MinDcrRemoteDelay uint16 = 288
    69  	MaxDcrRemoteDelay uint16 = 4032
    70  
    71  	// MinChanFundingSize is the smallest channel that we'll allow to be
    72  	// created over the RPC interface.
    73  	MinChanFundingSize = dcrutil.Amount(20000)
    74  
    75  	// maxDecredFundingAmount is a soft-limit of the maximum channel size
    76  	// currently accepted on the Decred chain within the Lightning
    77  	// Protocol. This limit is defined in BOLT-0002, and serves as an
    78  	// initial precautionary limit while implementations are battle tested
    79  	// in the real world.
    80  	MaxDecredFundingAmount = dcrutil.Amount(1<<30) - 1
    81  
    82  	// MaxDecredFundingAmountWumbo is a soft-limit on the maximum size of
    83  	// wumbo channels. This limit is 500 DCR and is the only thing standing
    84  	// between you and limitless channel size (apart from 21 million cap)
    85  	MaxDecredFundingAmountWumbo = dcrutil.Amount(500 * 1e8)
    86  
    87  	// MaxFundingAmount is a soft-limit of the maximum channel size
    88  	// currently accepted within the Lightning Protocol. This limit is
    89  	// defined in BOLT-0002, and serves as an initial precautionary limit
    90  	// while implementations are battle tested in the real world.
    91  	//
    92  	// TODO(roasbeef): add command line param to modify
    93  	MaxFundingAmount = MaxDecredFundingAmount
    94  
    95  	// TODO(roasbeef): tune
    96  	msgBufferSize = 50
    97  
    98  	// for the funding transaction to be confirmed before forgetting
    99  	// channels that aren't initiated by us. 4032 blocks is ~2 weeks.
   100  	maxWaitNumBlocksFundingConf = 4032
   101  )
   102  
   103  var (
   104  	// ErrFundingManagerShuttingDown is an error returned when attempting to
   105  	// process a funding request/message but the funding manager has already
   106  	// been signaled to shut down.
   107  	ErrFundingManagerShuttingDown = errors.New("funding manager shutting " +
   108  		"down")
   109  
   110  	// ErrConfirmationTimeout is an error returned when we as a responder
   111  	// are waiting for a funding transaction to confirm, but too many
   112  	// blocks pass without confirmation.
   113  	ErrConfirmationTimeout = errors.New("timeout waiting for funding " +
   114  		"confirmation")
   115  
   116  	// errUpfrontShutdownScriptNotSupported is returned if an upfront shutdown
   117  	// script is set for a peer that does not support the feature bit.
   118  	errUpfrontShutdownScriptNotSupported = errors.New("peer does not support" +
   119  		"option upfront shutdown script")
   120  
   121  	zeroID [32]byte
   122  )
   123  
   124  // reservationWithCtx encapsulates a pending channel reservation. This wrapper
   125  // struct is used internally within the funding manager to track and progress
   126  // the funding workflow initiated by incoming/outgoing methods from the target
   127  // peer. Additionally, this struct houses a response and error channel which is
   128  // used to respond to the caller in the case a channel workflow is initiated
   129  // via a local signal such as RPC.
   130  //
   131  // TODO(roasbeef): actually use the context package
   132  //   - deadlines, etc.
   133  type reservationWithCtx struct {
   134  	reservation *lnwallet.ChannelReservation
   135  	peer        lnpeer.Peer
   136  
   137  	chanAmt dcrutil.Amount
   138  
   139  	// Constraints we require for the remote.
   140  	remoteCsvDelay uint16
   141  	remoteMinHtlc  lnwire.MilliAtom
   142  	remoteMaxValue lnwire.MilliAtom
   143  	remoteMaxHtlcs uint16
   144  
   145  	// maxLocalCsv is the maximum csv we will accept from the remote.
   146  	maxLocalCsv uint16
   147  
   148  	// channelType is the explicit channel type proposed by the initiator of
   149  	// the channel.
   150  	channelType *lnwire.ChannelType
   151  
   152  	updateMtx   sync.RWMutex
   153  	lastUpdated time.Time
   154  
   155  	updates chan *lnrpc.OpenStatusUpdate
   156  	err     chan error
   157  }
   158  
   159  // isLocked checks the reservation's timestamp to determine whether it is
   160  // locked. It returns the last update time.
   161  func (r *reservationWithCtx) isLocked() (bool, time.Time) {
   162  	r.updateMtx.RLock()
   163  	defer r.updateMtx.RUnlock()
   164  
   165  	// The time zero value represents a locked reservation.
   166  	return r.lastUpdated.IsZero(), r.lastUpdated
   167  }
   168  
   169  // updateTimestamp updates the reservation's timestamp with the current time.
   170  func (r *reservationWithCtx) updateTimestamp() {
   171  	r.updateMtx.Lock()
   172  	defer r.updateMtx.Unlock()
   173  
   174  	r.lastUpdated = time.Now()
   175  }
   176  
   177  // InitFundingMsg is sent by an outside subsystem to the funding manager in
   178  // order to kick off a funding workflow with a specified target peer. The
   179  // original request which defines the parameters of the funding workflow are
   180  // embedded within this message giving the funding manager full context w.r.t
   181  // the workflow.
   182  type InitFundingMsg struct {
   183  	// Peer is the peer that we want to open a channel to.
   184  	Peer lnpeer.Peer
   185  
   186  	// TargetPubkey is the public key of the peer.
   187  	TargetPubkey *secp256k1.PublicKey
   188  
   189  	// ChainHash is the target genesis hash for this channel.
   190  	ChainHash chainhash.Hash
   191  
   192  	// SubtractFees set to true means that fees will be subtracted
   193  	// from the LocalFundingAmt.
   194  	SubtractFees bool
   195  
   196  	// LocalFundingAmt is the size of the channel.
   197  	LocalFundingAmt dcrutil.Amount
   198  
   199  	// PushAmt is the amount pushed to the counterparty.
   200  	PushAmt lnwire.MilliAtom
   201  
   202  	// FundingFeePerKB is the fee for the funding transaction.
   203  	FundingFeePerKB chainfee.AtomPerKByte
   204  
   205  	// Private determines whether or not this channel will be private.
   206  	Private bool
   207  
   208  	// MinHtlcIn is the minimum incoming HTLC that we accept.
   209  	MinHtlcIn lnwire.MilliAtom
   210  
   211  	// RemoteCsvDelay is the CSV delay we require for the remote peer.
   212  	RemoteCsvDelay uint16
   213  
   214  	// MinConfs indicates the minimum number of confirmations that each
   215  	// output selected to fund the channel should satisfy.
   216  	MinConfs int32
   217  
   218  	// ShutdownScript is an optional upfront shutdown script for the
   219  	// channel. This value is optional, so may be nil.
   220  	ShutdownScript lnwire.DeliveryAddress
   221  
   222  	// MaxValueInFlight is the maximum amount of coins in MilliAtom
   223  	// that can be pending within the channel. It only applies to the
   224  	// remote party.
   225  	MaxValueInFlight lnwire.MilliAtom
   226  
   227  	// MaxHtlcs is the maximum number of HTLCs that the remote peer
   228  	// can offer us.
   229  	MaxHtlcs uint16
   230  
   231  	// MaxLocalCsv is the maximum local csv delay we will accept from our
   232  	// peer.
   233  	MaxLocalCsv uint16
   234  
   235  	// ChanFunder is an optional channel funder that allows the caller to
   236  	// control exactly how the channel funding is carried out. If not
   237  	// specified, then the default chanfunding.WalletAssembler will be
   238  	// used.
   239  	ChanFunder chanfunding.Assembler
   240  
   241  	// PendingChanID is not all zeroes (the default value), then this will
   242  	// be the pending channel ID used for the funding flow within the wire
   243  	// protocol.
   244  	PendingChanID [32]byte
   245  
   246  	// ChannelType allows the caller to use an explicit channel type for the
   247  	// funding negotiation. This type will only be observed if BOTH sides
   248  	// support explicit channel type negotiation.
   249  	ChannelType *lnwire.ChannelType
   250  
   251  	// Updates is a channel which updates to the opening status of the channel
   252  	// are sent on.
   253  	Updates chan *lnrpc.OpenStatusUpdate
   254  
   255  	// Err is a channel which errors encountered during the funding flow are
   256  	// sent on.
   257  	Err chan error
   258  }
   259  
   260  // fundingMsg is sent by the ProcessFundingMsg function and packages a
   261  // funding-specific lnwire.Message along with the lnpeer.Peer that sent it.
   262  type fundingMsg struct {
   263  	msg  lnwire.Message
   264  	peer lnpeer.Peer
   265  }
   266  
   267  // pendingChannels is a map instantiated per-peer which tracks all active
   268  // pending single funded channels indexed by their pending channel identifier,
   269  // which is a set of 32-bytes generated via a CSPRNG.
   270  type pendingChannels map[[32]byte]*reservationWithCtx
   271  
   272  // serializedPubKey is used within the FundingManager's activeReservations list
   273  // to identify the nodes with which the FundingManager is actively working to
   274  // initiate new channels.
   275  type serializedPubKey [33]byte
   276  
   277  // newSerializedKey creates a new serialized public key from an instance of a
   278  // live pubkey object.
   279  func newSerializedKey(pubKey *secp256k1.PublicKey) serializedPubKey {
   280  	var s serializedPubKey
   281  	copy(s[:], pubKey.SerializeCompressed())
   282  	return s
   283  }
   284  
   285  // Config defines the configuration for the FundingManager. All elements
   286  // within the configuration MUST be non-nil for the FundingManager to carry out
   287  // its duties.
   288  type Config struct {
   289  	// NoWumboChans indicates if we're to reject all incoming wumbo channel
   290  	// requests, and also reject all outgoing wumbo channel requests.
   291  	NoWumboChans bool
   292  
   293  	// IDKey is the PublicKey that is used to identify this node within the
   294  	// Lightning Network.
   295  	IDKey *secp256k1.PublicKey
   296  
   297  	// IDKeyLoc is the locator for the key that is used to identify this
   298  	// node within the LightningNetwork.
   299  	IDKeyLoc keychain.KeyLocator
   300  
   301  	// Wallet handles the parts of the funding process that involves moving
   302  	// funds from on-chain transaction outputs into Lightning channels.
   303  	Wallet *lnwallet.LightningWallet
   304  
   305  	// PublishTransaction facilitates the process of broadcasting a
   306  	// transaction to the network.
   307  	PublishTransaction func(*wire.MsgTx, string) error
   308  
   309  	// UpdateLabel updates the label that a transaction has in our wallet,
   310  	// overwriting any existing labels.
   311  	UpdateLabel func(chainhash.Hash, string) error
   312  
   313  	// FeeEstimator calculates appropriate fee rates based on historical
   314  	// transaction information.
   315  	FeeEstimator chainfee.Estimator
   316  
   317  	// Notifier is used by the FundingManager to determine when the
   318  	// channel's funding transaction has been confirmed on the blockchain
   319  	// so that the channel creation process can be completed.
   320  	Notifier chainntnfs.ChainNotifier
   321  
   322  	// SignMessage signs an arbitrary message with a given public key. The
   323  	// actual digest signed is the double sha-256 of the message. In the
   324  	// case that the private key corresponding to the passed public key
   325  	// cannot be located, then an error is returned.
   326  	//
   327  	// TODO(roasbeef): should instead pass on this responsibility to a
   328  	// distinct sub-system?
   329  	SignMessage func(keyLoc keychain.KeyLocator,
   330  		msg []byte, doubleHash bool) (*ecdsa.Signature, error)
   331  
   332  	// CurrentNodeAnnouncement should return the latest, fully signed node
   333  	// announcement from the backing Lightning Network node.
   334  	CurrentNodeAnnouncement func() (lnwire.NodeAnnouncement, error)
   335  
   336  	// SendAnnouncement is used by the FundingManager to send announcement
   337  	// messages to the Gossiper to possibly broadcast to the greater
   338  	// network. A set of optional message fields can be provided to populate
   339  	// any information within the graph that is not included in the gossip
   340  	// message.
   341  	SendAnnouncement func(msg lnwire.Message,
   342  		optionalFields ...discovery.OptionalMsgField) chan error
   343  
   344  	// NotifyWhenOnline allows the FundingManager to register with a
   345  	// subsystem that will notify it when the peer comes online. This is
   346  	// used when sending the fundingLocked message, since it MUST be
   347  	// delivered after the funding transaction is confirmed.
   348  	//
   349  	// NOTE: The peerChan channel must be buffered.
   350  	NotifyWhenOnline func(peer [33]byte, peerChan chan<- lnpeer.Peer)
   351  
   352  	// FindChannel queries the database for the channel with the given
   353  	// channel ID.
   354  	FindChannel func(chanID lnwire.ChannelID) (*channeldb.OpenChannel, error)
   355  
   356  	// TempChanIDSeed is a cryptographically random string of bytes that's
   357  	// used as a seed to generate pending channel ID's.
   358  	TempChanIDSeed [32]byte
   359  
   360  	// DefaultRoutingPolicy is the default routing policy used when
   361  	// initially announcing channels.
   362  	DefaultRoutingPolicy htlcswitch.ForwardingPolicy
   363  
   364  	// DefaultMinHtlcIn is the default minimum incoming htlc value that is
   365  	// set as a channel parameter.
   366  	DefaultMinHtlcIn lnwire.MilliAtom
   367  
   368  	// NumRequiredConfs is a function closure that helps the funding
   369  	// manager decide how many confirmations it should require for a
   370  	// channel extended to it. The function is able to take into account
   371  	// the amount of the channel, and any funds we'll be pushed in the
   372  	// process to determine how many confirmations we'll require.
   373  	NumRequiredConfs func(dcrutil.Amount, lnwire.MilliAtom) uint16
   374  
   375  	// RequiredRemoteDelay is a function that maps the total amount in a
   376  	// proposed channel to the CSV delay that we'll require for the remote
   377  	// party. Naturally a larger channel should require a higher CSV delay
   378  	// in order to give us more time to claim funds in the case of a
   379  	// contract breach.
   380  	RequiredRemoteDelay func(dcrutil.Amount) uint16
   381  
   382  	// RequiredRemoteChanReserve is a function closure that, given the
   383  	// channel capacity and dust limit, will return an appropriate amount
   384  	// for the remote peer's required channel reserve that is to be adhered
   385  	// to at all times.
   386  	RequiredRemoteChanReserve func(capacity, dustLimit dcrutil.Amount) dcrutil.Amount
   387  
   388  	// RequiredRemoteMaxValue is a function closure that, given the channel
   389  	// capacity, returns the amount of MilliAtoms that our remote peer
   390  	// can have in total outstanding HTLCs with us.
   391  	RequiredRemoteMaxValue func(dcrutil.Amount) lnwire.MilliAtom
   392  
   393  	// RequiredRemoteMaxHTLCs is a function closure that, given the channel
   394  	// capacity, returns the number of maximum HTLCs the remote peer can
   395  	// offer us.
   396  	RequiredRemoteMaxHTLCs func(dcrutil.Amount) uint16
   397  
   398  	// WatchNewChannel is to be called once a new channel enters the final
   399  	// funding stage: waiting for on-chain confirmation. This method sends
   400  	// the channel to the ChainArbitrator so it can watch for any on-chain
   401  	// events related to the channel. We also provide the public key of the
   402  	// node we're establishing a channel with for reconnection purposes.
   403  	WatchNewChannel func(*channeldb.OpenChannel, *secp256k1.PublicKey) error
   404  
   405  	// ReportShortChanID allows the funding manager to report the newly
   406  	// discovered short channel ID of a formerly pending channel to outside
   407  	// sub-systems.
   408  	ReportShortChanID func(wire.OutPoint) error
   409  
   410  	// ZombieSweeperInterval is the periodic time interval in which the
   411  	// zombie sweeper is run.
   412  	ZombieSweeperInterval time.Duration
   413  
   414  	// ReservationTimeout is the length of idle time that must pass before
   415  	// a reservation is considered a zombie.
   416  	ReservationTimeout time.Duration
   417  
   418  	// MinChanSize is the smallest channel size that we'll accept as an
   419  	// inbound channel. We have such a parameter, as otherwise, nodes could
   420  	// flood us with very small channels that would never really be usable
   421  	// due to fees.
   422  	MinChanSize dcrutil.Amount
   423  
   424  	// MaxChanSize is the largest channel size that we'll accept as an
   425  	// inbound channel. We have such a parameter, so that you may decide how
   426  	// WUMBO you would like your channel.
   427  	MaxChanSize dcrutil.Amount
   428  
   429  	// MaxPendingChannels is the maximum number of pending channels we
   430  	// allow for each peer.
   431  	MaxPendingChannels int
   432  
   433  	// RejectPush is set true if the fundingmanager should reject any
   434  	// incoming channels having a non-zero push amount.
   435  	RejectPush bool
   436  
   437  	// MaxLocalCSVDelay is the maximum csv delay we will allow for our
   438  	// commit output. Channels that exceed this value will be failed.
   439  	MaxLocalCSVDelay uint16
   440  
   441  	// NotifyOpenChannelEvent informs the ChannelNotifier when channels
   442  	// transition from pending open to open.
   443  	NotifyOpenChannelEvent func(wire.OutPoint)
   444  
   445  	// OpenChannelPredicate is a predicate on the lnwire.OpenChannel message
   446  	// and on the requesting node's public key that returns a bool which tells
   447  	// the funding manager whether or not to accept the channel.
   448  	OpenChannelPredicate chanacceptor.ChannelAcceptor
   449  
   450  	// NotifyPendingOpenChannelEvent informs the ChannelNotifier when channels
   451  	// enter a pending state.
   452  	NotifyPendingOpenChannelEvent func(wire.OutPoint, *channeldb.OpenChannel)
   453  
   454  	// EnableUpfrontShutdown specifies whether the upfront shutdown script
   455  	// is enabled.
   456  	EnableUpfrontShutdown bool
   457  
   458  	// RegisteredChains keeps track of all chains that have been registered
   459  	// with the daemon.
   460  	RegisteredChains *chainreg.ChainRegistry
   461  
   462  	// MaxAnchorsCommitFeeRate is the max commitment fee rate we'll use as
   463  	// the initiator for channels of the anchor type.
   464  	MaxAnchorsCommitFeeRate chainfee.AtomPerKByte
   465  }
   466  
   467  // Manager acts as an orchestrator/bridge between the wallet's
   468  // 'ChannelReservation' workflow, and the wire protocol's funding initiation
   469  // messages. Any requests to initiate the funding workflow for a channel,
   470  // either kicked-off locally or remotely are handled by the funding manager.
   471  // Once a channel's funding workflow has been completed, any local callers, the
   472  // local peer, and possibly the remote peer are notified of the completion of
   473  // the channel workflow. Additionally, any temporary or permanent access
   474  // controls between the wallet and remote peers are enforced via the funding
   475  // manager.
   476  type Manager struct {
   477  	started sync.Once
   478  	stopped sync.Once
   479  
   480  	// cfg is a copy of the configuration struct that the FundingManager
   481  	// was initialized with.
   482  	cfg *Config
   483  
   484  	// chanIDKey is a cryptographically random key that's used to generate
   485  	// temporary channel ID's.
   486  	chanIDKey [32]byte
   487  
   488  	// chanIDNonce is a nonce that's incremented for each new funding
   489  	// reservation created.
   490  	nonceMtx    sync.RWMutex
   491  	chanIDNonce uint64
   492  
   493  	// activeReservations is a map which houses the state of all pending
   494  	// funding workflows.
   495  	activeReservations map[serializedPubKey]pendingChannels
   496  
   497  	// signedReservations is a utility map that maps the permanent channel
   498  	// ID of a funding reservation to its temporary channel ID. This is
   499  	// required as mid funding flow, we switch to referencing the channel
   500  	// by its full channel ID once the commitment transactions have been
   501  	// signed by both parties.
   502  	signedReservations map[lnwire.ChannelID][32]byte
   503  
   504  	// resMtx guards both of the maps above to ensure that all access is
   505  	// goroutine safe.
   506  	resMtx sync.RWMutex
   507  
   508  	// fundingMsgs is a channel that relays fundingMsg structs from
   509  	// external sub-systems using the ProcessFundingMsg call.
   510  	fundingMsgs chan *fundingMsg
   511  
   512  	// fundingRequests is a channel used to receive channel initiation
   513  	// requests from a local subsystem within the daemon.
   514  	fundingRequests chan *InitFundingMsg
   515  
   516  	// newChanBarriers is a map from a channel ID to a 'barrier' which will
   517  	// be signalled once the channel is fully open. This barrier acts as a
   518  	// synchronization point for any incoming/outgoing HTLCs before the
   519  	// channel has been fully opened.
   520  	barrierMtx      sync.RWMutex
   521  	newChanBarriers map[lnwire.ChannelID]chan struct{}
   522  
   523  	localDiscoveryMtx     sync.Mutex
   524  	localDiscoverySignals map[lnwire.ChannelID]chan struct{}
   525  
   526  	handleFundingLockedMtx      sync.RWMutex
   527  	handleFundingLockedBarriers map[lnwire.ChannelID]struct{}
   528  
   529  	quit chan struct{}
   530  	wg   sync.WaitGroup
   531  }
   532  
   533  // channelOpeningState represents the different states a channel can be in
   534  // between the funding transaction has been confirmed and the channel is
   535  // announced to the network and ready to be used.
   536  type channelOpeningState uint8
   537  
   538  const (
   539  	// markedOpen is the opening state of a channel if the funding
   540  	// transaction is confirmed on-chain, but fundingLocked is not yet
   541  	// successfully sent to the other peer.
   542  	markedOpen channelOpeningState = iota
   543  
   544  	// fundingLockedSent is the opening state of a channel if the
   545  	// fundingLocked message has successfully been sent to the other peer,
   546  	// but we still haven't announced the channel to the network.
   547  	fundingLockedSent
   548  
   549  	// addedToRouterGraph is the opening state of a channel if the
   550  	// channel has been successfully added to the router graph
   551  	// immediately after the fundingLocked message has been sent, but
   552  	// we still haven't announced the channel to the network.
   553  	addedToRouterGraph
   554  )
   555  
   556  func (c channelOpeningState) String() string {
   557  	switch c {
   558  	case markedOpen:
   559  		return "markedOpen"
   560  	case fundingLockedSent:
   561  		return "fundingLocked"
   562  	case addedToRouterGraph:
   563  		return "addedToRouterGraph"
   564  	default:
   565  		return "unknown"
   566  	}
   567  }
   568  
   569  // NewFundingManager creates and initializes a new instance of the
   570  // fundingManager.
   571  func NewFundingManager(cfg Config) (*Manager, error) {
   572  	return &Manager{
   573  		cfg:                         &cfg,
   574  		chanIDKey:                   cfg.TempChanIDSeed,
   575  		activeReservations:          make(map[serializedPubKey]pendingChannels),
   576  		signedReservations:          make(map[lnwire.ChannelID][32]byte),
   577  		newChanBarriers:             make(map[lnwire.ChannelID]chan struct{}),
   578  		fundingMsgs:                 make(chan *fundingMsg, msgBufferSize),
   579  		fundingRequests:             make(chan *InitFundingMsg, msgBufferSize),
   580  		localDiscoverySignals:       make(map[lnwire.ChannelID]chan struct{}),
   581  		handleFundingLockedBarriers: make(map[lnwire.ChannelID]struct{}),
   582  		quit:                        make(chan struct{}),
   583  	}, nil
   584  }
   585  
   586  // Start launches all helper goroutines required for handling requests sent
   587  // to the funding manager.
   588  func (f *Manager) Start() error {
   589  	var err error
   590  	f.started.Do(func() {
   591  		err = f.start()
   592  	})
   593  	return err
   594  }
   595  
   596  func (f *Manager) start() error {
   597  	log.Tracef("Funding manager running")
   598  
   599  	// Upon restart, the Funding Manager will check the database to load any
   600  	// channels that were  waiting for their funding transactions to be
   601  	// confirmed on the blockchain at the time when the daemon last went
   602  	// down.
   603  	// TODO(roasbeef): store height that funding finished?
   604  	//  * would then replace call below
   605  	allChannels, err := f.cfg.Wallet.Cfg.Database.FetchAllChannels()
   606  	if err != nil {
   607  		return err
   608  	}
   609  
   610  	for _, channel := range allChannels {
   611  		chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
   612  
   613  		// For any channels that were in a pending state when the
   614  		// daemon was last connected, the Funding Manager will
   615  		// re-initialize the channel barriers, and republish the
   616  		// funding transaction if we're the initiator.
   617  		if channel.IsPending {
   618  			f.barrierMtx.Lock()
   619  			log.Tracef("Loading pending ChannelPoint(%v), "+
   620  				"creating chan barrier",
   621  				channel.FundingOutpoint)
   622  
   623  			f.newChanBarriers[chanID] = make(chan struct{})
   624  			f.barrierMtx.Unlock()
   625  
   626  			f.localDiscoverySignals[chanID] = make(chan struct{})
   627  
   628  			// Rebroadcast the funding transaction for any pending
   629  			// channel that we initiated. No error will be returned
   630  			// if the transaction already has been broadcast.
   631  			chanType := channel.ChanType
   632  			if chanType.IsSingleFunder() && chanType.HasFundingTx() &&
   633  				channel.IsInitiator {
   634  
   635  				var fundingTxBuf bytes.Buffer
   636  				err := channel.FundingTxn.Serialize(&fundingTxBuf)
   637  				if err != nil {
   638  					log.Errorf("Unable to serialize "+
   639  						"funding transaction %v: %v",
   640  						channel.FundingTxn.TxHash(), err)
   641  
   642  					// Clear the buffer of any bytes that
   643  					// were written before the serialization
   644  					// error to prevent logging an
   645  					// incomplete transaction.
   646  					fundingTxBuf.Reset()
   647  				}
   648  
   649  				log.Debugf("Rebroadcasting funding tx for "+
   650  					"ChannelPoint(%v): %x",
   651  					channel.FundingOutpoint,
   652  					fundingTxBuf.Bytes())
   653  
   654  				// Set a nil short channel ID at this stage
   655  				// because we do not know it until our funding
   656  				// tx confirms.
   657  				label := labels.MakeLabel(
   658  					labels.LabelTypeChannelOpen, nil,
   659  				)
   660  
   661  				err = f.cfg.PublishTransaction(
   662  					channel.FundingTxn, label,
   663  				)
   664  				if err != nil {
   665  					log.Errorf("Unable to rebroadcast "+
   666  						"funding tx %x for "+
   667  						"ChannelPoint(%v): %v",
   668  						fundingTxBuf.Bytes(),
   669  						channel.FundingOutpoint, err)
   670  				}
   671  			}
   672  		}
   673  
   674  		// We will restart the funding state machine for all channels,
   675  		// which will wait for the channel's funding transaction to be
   676  		// confirmed on the blockchain, and transmit the messages
   677  		// necessary for the channel to be operational.
   678  		f.wg.Add(1)
   679  		go f.advanceFundingState(channel, chanID, nil)
   680  	}
   681  
   682  	f.wg.Add(1) // TODO(roasbeef): tune
   683  	go f.reservationCoordinator()
   684  
   685  	return nil
   686  }
   687  
   688  // Stop signals all helper goroutines to execute a graceful shutdown. This
   689  // method will block until all goroutines have exited.
   690  func (f *Manager) Stop() error {
   691  	f.stopped.Do(func() {
   692  		log.Info("Funding manager shutting down")
   693  		close(f.quit)
   694  		f.wg.Wait()
   695  	})
   696  
   697  	return nil
   698  }
   699  
   700  // nextPendingChanID returns the next free pending channel ID to be used to
   701  // identify a particular future channel funding workflow.
   702  func (f *Manager) nextPendingChanID() [32]byte {
   703  	// Obtain a fresh nonce. We do this by encoding the current nonce
   704  	// counter, then incrementing it by one.
   705  	f.nonceMtx.Lock()
   706  	var nonce [8]byte
   707  	binary.LittleEndian.PutUint64(nonce[:], f.chanIDNonce)
   708  	f.chanIDNonce++
   709  	f.nonceMtx.Unlock()
   710  
   711  	// We'll generate the next pending channelID by "encrypting" 32-bytes
   712  	// of zeroes which'll extract 32 random bytes from our stream cipher.
   713  	var (
   714  		nextChanID [32]byte
   715  		zeroes     [32]byte
   716  	)
   717  	salsa20.XORKeyStream(nextChanID[:], zeroes[:], nonce[:], &f.chanIDKey)
   718  
   719  	return nextChanID
   720  }
   721  
   722  // CancelPeerReservations cancels all active reservations associated with the
   723  // passed node. This will ensure any outputs which have been pre committed,
   724  // (and thus locked from coin selection), are properly freed.
   725  func (f *Manager) CancelPeerReservations(nodePub [33]byte) {
   726  
   727  	log.Debugf("Cancelling all reservations for peer %x", nodePub[:])
   728  
   729  	f.resMtx.Lock()
   730  	defer f.resMtx.Unlock()
   731  
   732  	// We'll attempt to look up this node in the set of active
   733  	// reservations.  If they don't have any, then there's no further work
   734  	// to be done.
   735  	nodeReservations, ok := f.activeReservations[nodePub]
   736  	if !ok {
   737  		log.Debugf("No active reservations for node: %x", nodePub[:])
   738  		return
   739  	}
   740  
   741  	// If they do have any active reservations, then we'll cancel all of
   742  	// them (which releases any locked UTXO's), and also delete it from the
   743  	// reservation map.
   744  	for pendingID, resCtx := range nodeReservations {
   745  		if err := resCtx.reservation.Cancel(); err != nil {
   746  			log.Errorf("unable to cancel reservation for "+
   747  				"node=%x: %v", nodePub[:], err)
   748  		}
   749  
   750  		resCtx.err <- fmt.Errorf("peer disconnected")
   751  		delete(nodeReservations, pendingID)
   752  	}
   753  
   754  	// Finally, we'll delete the node itself from the set of reservations.
   755  	delete(f.activeReservations, nodePub)
   756  }
   757  
   758  // failFundingFlow will fail the active funding flow with the target peer,
   759  // identified by its unique temporary channel ID. This method will send an
   760  // error to the remote peer, and also remove the reservation from our set of
   761  // pending reservations.
   762  //
   763  // TODO(roasbeef): if peer disconnects, and haven't yet broadcast funding
   764  // transaction, then all reservations should be cleared.
   765  func (f *Manager) failFundingFlow(peer lnpeer.Peer, tempChanID [32]byte,
   766  	fundingErr error) {
   767  
   768  	log.Debugf("Failing funding flow for pending_id=%x: %v",
   769  		tempChanID, fundingErr)
   770  
   771  	ctx, err := f.cancelReservationCtx(peer.IdentityKey(), tempChanID, false)
   772  	if err != nil {
   773  		log.Errorf("unable to cancel reservation: %v", err)
   774  	}
   775  
   776  	// In case the case where the reservation existed, send the funding
   777  	// error on the error channel.
   778  	if ctx != nil {
   779  		ctx.err <- fundingErr
   780  	}
   781  
   782  	// We only send the exact error if it is part of out whitelisted set of
   783  	// errors (lnwire.FundingError or lnwallet.ReservationError).
   784  	var msg lnwire.ErrorData
   785  	switch e := fundingErr.(type) {
   786  
   787  	// Let the actual error message be sent to the remote for the
   788  	// whitelisted types.
   789  	case lnwallet.ReservationError:
   790  		msg = lnwire.ErrorData(e.Error())
   791  	case lnwire.FundingError:
   792  		msg = lnwire.ErrorData(e.Error())
   793  	case chanacceptor.ChanAcceptError:
   794  		msg = lnwire.ErrorData(e.Error())
   795  
   796  	// For all other error types we just send a generic error.
   797  	default:
   798  		msg = lnwire.ErrorData("funding failed due to internal error")
   799  	}
   800  
   801  	errMsg := &lnwire.Error{
   802  		ChanID: tempChanID,
   803  		Data:   msg,
   804  	}
   805  
   806  	log.Debugf("Sending funding error to peer (%x): %v",
   807  		peer.IdentityKey().SerializeCompressed(), spew.Sdump(errMsg))
   808  	if err := peer.SendMessage(false, errMsg); err != nil {
   809  		log.Errorf("unable to send error message to peer %v", err)
   810  	}
   811  }
   812  
   813  // reservationCoordinator is the primary goroutine tasked with progressing the
   814  // funding workflow between the wallet, and any outside peers or local callers.
   815  //
   816  // NOTE: This MUST be run as a goroutine.
   817  func (f *Manager) reservationCoordinator() {
   818  	defer f.wg.Done()
   819  
   820  	zombieSweepTicker := time.NewTicker(f.cfg.ZombieSweeperInterval)
   821  	defer zombieSweepTicker.Stop()
   822  
   823  	for {
   824  		select {
   825  
   826  		case fmsg := <-f.fundingMsgs:
   827  			switch msg := fmsg.msg.(type) {
   828  			case *lnwire.OpenChannel:
   829  				f.handleFundingOpen(fmsg.peer, msg)
   830  			case *lnwire.AcceptChannel:
   831  				f.handleFundingAccept(fmsg.peer, msg)
   832  			case *lnwire.FundingCreated:
   833  				f.handleFundingCreated(fmsg.peer, msg)
   834  			case *lnwire.FundingSigned:
   835  				f.handleFundingSigned(fmsg.peer, msg)
   836  			case *lnwire.FundingLocked:
   837  				f.wg.Add(1)
   838  				go f.handleFundingLocked(fmsg.peer, msg)
   839  			case *lnwire.Error:
   840  				f.handleErrorMsg(fmsg.peer, msg)
   841  			}
   842  		case req := <-f.fundingRequests:
   843  			f.handleInitFundingMsg(req)
   844  
   845  		case <-zombieSweepTicker.C:
   846  			f.pruneZombieReservations()
   847  
   848  		case <-f.quit:
   849  			return
   850  		}
   851  	}
   852  }
   853  
   854  // advanceFundingState will advance the channel through the steps after the
   855  // funding transaction is broadcasted, up until the point where the channel is
   856  // ready for operation. This includes waiting for the funding transaction to
   857  // confirm, sending funding locked to the peer, adding the channel to the
   858  // router graph, and announcing the channel. The updateChan can be set non-nil
   859  // to get OpenStatusUpdates.
   860  //
   861  // NOTE: This MUST be run as a goroutine.
   862  func (f *Manager) advanceFundingState(channel *channeldb.OpenChannel,
   863  	pendingChanID [32]byte, updateChan chan<- *lnrpc.OpenStatusUpdate) {
   864  
   865  	defer f.wg.Done()
   866  
   867  	// If the channel is still pending we must wait for the funding
   868  	// transaction to confirm.
   869  	if channel.IsPending {
   870  		err := f.advancePendingChannelState(channel, pendingChanID)
   871  		if err != nil {
   872  			log.Errorf("Unable to advance pending state of "+
   873  				"ChannelPoint(%v): %v",
   874  				channel.FundingOutpoint, err)
   875  			return
   876  		}
   877  	}
   878  
   879  	// We create the state-machine object which wraps the database state.
   880  	lnChannel, err := lnwallet.NewLightningChannel(
   881  		nil, channel, nil, &f.cfg.Wallet.Cfg.NetParams,
   882  	)
   883  	if err != nil {
   884  		log.Errorf("Unable to create LightningChannel(%v): %v",
   885  			channel.FundingOutpoint, err)
   886  		return
   887  	}
   888  
   889  	for {
   890  		channelState, shortChanID, err := f.getChannelOpeningState(
   891  			&channel.FundingOutpoint,
   892  		)
   893  		if err == channeldb.ErrChannelNotFound {
   894  			// Channel not in fundingManager's opening database,
   895  			// meaning it was successfully announced to the
   896  			// network.
   897  			// TODO(halseth): could do graph consistency check
   898  			// here, and re-add the edge if missing.
   899  			log.Debugf("ChannelPoint(%v) with chan_id=%x not "+
   900  				"found in opening database, assuming already "+
   901  				"announced to the network",
   902  				channel.FundingOutpoint, pendingChanID)
   903  			return
   904  		} else if err != nil {
   905  			log.Errorf("Unable to query database for "+
   906  				"channel opening state(%v): %v",
   907  				channel.FundingOutpoint, err)
   908  			return
   909  		}
   910  
   911  		// If we did find the channel in the opening state database, we
   912  		// have seen the funding transaction being confirmed, but there
   913  		// are still steps left of the setup procedure. We continue the
   914  		// procedure where we left off.
   915  		err = f.stateStep(
   916  			channel, lnChannel, shortChanID, pendingChanID,
   917  			channelState, updateChan,
   918  		)
   919  		if err != nil {
   920  			log.Errorf("Unable to advance state(%v): %v",
   921  				channel.FundingOutpoint, err)
   922  			return
   923  		}
   924  	}
   925  }
   926  
   927  // stateStep advances the confirmed channel one step in the funding state
   928  // machine. This method is synchronous and the new channel opening state will
   929  // have been written to the database when it successfully returns. The
   930  // updateChan can be set non-nil to get OpenStatusUpdates.
   931  func (f *Manager) stateStep(channel *channeldb.OpenChannel,
   932  	lnChannel *lnwallet.LightningChannel,
   933  	shortChanID *lnwire.ShortChannelID, pendingChanID [32]byte,
   934  	channelState channelOpeningState,
   935  	updateChan chan<- *lnrpc.OpenStatusUpdate) error {
   936  
   937  	chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
   938  	log.Debugf("Channel(%v) with ShortChanID %v has opening state %v",
   939  		chanID, shortChanID, channelState)
   940  
   941  	switch channelState {
   942  
   943  	// The funding transaction was confirmed, but we did not successfully
   944  	// send the fundingLocked message to the peer, so let's do that now.
   945  	case markedOpen:
   946  		err := f.sendFundingLocked(channel, lnChannel, shortChanID)
   947  		if err != nil {
   948  			return fmt.Errorf("failed sending fundingLocked: %v",
   949  				err)
   950  		}
   951  
   952  		// As the fundingLocked message is now sent to the peer, the
   953  		// channel is moved to the next state of the state machine. It
   954  		// will be moved to the last state (actually deleted from the
   955  		// database) after the channel is finally announced.
   956  		err = f.saveChannelOpeningState(
   957  			&channel.FundingOutpoint, fundingLockedSent,
   958  			shortChanID,
   959  		)
   960  		if err != nil {
   961  			return fmt.Errorf("error setting channel state to"+
   962  				" fundingLockedSent: %v", err)
   963  		}
   964  
   965  		log.Debugf("Channel(%v) with ShortChanID %v: successfully "+
   966  			"sent FundingLocked", chanID, shortChanID)
   967  
   968  		return nil
   969  
   970  	// fundingLocked was sent to peer, but the channel was not added to the
   971  	// router graph and the channel announcement was not sent.
   972  	case fundingLockedSent:
   973  		err := f.addToRouterGraph(channel, shortChanID)
   974  		if err != nil {
   975  			return fmt.Errorf("failed adding to "+
   976  				"router graph: %v", err)
   977  		}
   978  
   979  		// As the channel is now added to the ChannelRouter's topology,
   980  		// the channel is moved to the next state of the state machine.
   981  		// It will be moved to the last state (actually deleted from
   982  		// the database) after the channel is finally announced.
   983  		err = f.saveChannelOpeningState(
   984  			&channel.FundingOutpoint, addedToRouterGraph,
   985  			shortChanID,
   986  		)
   987  		if err != nil {
   988  			return fmt.Errorf("error setting channel state to"+
   989  				" addedToRouterGraph: %v", err)
   990  		}
   991  
   992  		log.Debugf("Channel(%v) with ShortChanID %v: successfully "+
   993  			"added to router graph", chanID, shortChanID)
   994  
   995  		// Give the caller a final update notifying them that
   996  		// the channel is now open.
   997  		// TODO(roasbeef): only notify after recv of funding locked?
   998  		fundingPoint := channel.FundingOutpoint
   999  		cp := &lnrpc.ChannelPoint{
  1000  			FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  1001  				FundingTxidBytes: fundingPoint.Hash[:],
  1002  			},
  1003  			OutputIndex: fundingPoint.Index,
  1004  		}
  1005  
  1006  		if updateChan != nil {
  1007  			upd := &lnrpc.OpenStatusUpdate{
  1008  				Update: &lnrpc.OpenStatusUpdate_ChanOpen{
  1009  					ChanOpen: &lnrpc.ChannelOpenUpdate{
  1010  						ChannelPoint: cp,
  1011  					},
  1012  				},
  1013  				PendingChanId: pendingChanID[:],
  1014  			}
  1015  
  1016  			select {
  1017  			case updateChan <- upd:
  1018  			case <-f.quit:
  1019  				return ErrFundingManagerShuttingDown
  1020  			}
  1021  		}
  1022  
  1023  		return nil
  1024  
  1025  	// The channel was added to the Router's topology, but the channel
  1026  	// announcement was not sent.
  1027  	case addedToRouterGraph:
  1028  		err := f.annAfterSixConfs(channel, shortChanID)
  1029  		if err != nil {
  1030  			return fmt.Errorf("error sending channel "+
  1031  				"announcement: %v", err)
  1032  		}
  1033  
  1034  		// We delete the channel opening state from our internal
  1035  		// database as the opening process has succeeded. We can do
  1036  		// this because we assume the AuthenticatedGossiper queues the
  1037  		// announcement messages, and persists them in case of a daemon
  1038  		// shutdown.
  1039  		err = f.deleteChannelOpeningState(&channel.FundingOutpoint)
  1040  		if err != nil {
  1041  			return fmt.Errorf("error deleting channel state: %v",
  1042  				err)
  1043  		}
  1044  
  1045  		log.Debugf("Channel(%v) with ShortChanID %v: successfully "+
  1046  			"announced", chanID, shortChanID)
  1047  
  1048  		return nil
  1049  	}
  1050  
  1051  	return fmt.Errorf("undefined channelState: %v", channelState)
  1052  }
  1053  
  1054  // advancePendingChannelState waits for a pending channel's funding tx to
  1055  // confirm, and marks it open in the database when that happens.
  1056  func (f *Manager) advancePendingChannelState(
  1057  	channel *channeldb.OpenChannel, pendingChanID [32]byte) error {
  1058  
  1059  	confChannel, err := f.waitForFundingWithTimeout(channel)
  1060  	if err == ErrConfirmationTimeout {
  1061  		// We'll get a timeout if the number of blocks mined
  1062  		// since the channel was initiated reaches
  1063  		// maxWaitNumBlocksFundingConf and we are not the
  1064  		// channel initiator.
  1065  		ch := channel
  1066  		localBalance := ch.LocalCommitment.LocalBalance.ToAtoms()
  1067  		closeInfo := &channeldb.ChannelCloseSummary{
  1068  			ChainHash:               ch.ChainHash,
  1069  			ChanPoint:               ch.FundingOutpoint,
  1070  			RemotePub:               ch.IdentityPub,
  1071  			Capacity:                ch.Capacity,
  1072  			SettledBalance:          localBalance,
  1073  			CloseType:               channeldb.FundingCanceled,
  1074  			RemoteCurrentRevocation: ch.RemoteCurrentRevocation,
  1075  			RemoteNextRevocation:    ch.RemoteNextRevocation,
  1076  			LocalChanConfig:         ch.LocalChanCfg,
  1077  		}
  1078  
  1079  		// Close the channel with us as the initiator because we are
  1080  		// timing the channel out.
  1081  		if err := ch.CloseChannel(
  1082  			closeInfo, channeldb.ChanStatusLocalCloseInitiator,
  1083  		); err != nil {
  1084  			return fmt.Errorf("failed closing channel "+
  1085  				"%v: %v", ch.FundingOutpoint, err)
  1086  		}
  1087  
  1088  		timeoutErr := fmt.Errorf("timeout waiting for funding tx "+
  1089  			"(%v) to confirm", channel.FundingOutpoint)
  1090  
  1091  		// When the peer comes online, we'll notify it that we
  1092  		// are now considering the channel flow canceled.
  1093  		f.wg.Add(1)
  1094  		go func() {
  1095  			defer f.wg.Done()
  1096  
  1097  			peerChan := make(chan lnpeer.Peer, 1)
  1098  			var peerKey [33]byte
  1099  			copy(peerKey[:], ch.IdentityPub.SerializeCompressed())
  1100  
  1101  			f.cfg.NotifyWhenOnline(peerKey, peerChan)
  1102  
  1103  			var peer lnpeer.Peer
  1104  			select {
  1105  			case peer = <-peerChan:
  1106  			case <-f.quit:
  1107  				return
  1108  			}
  1109  			// TODO(halseth): should this send be made
  1110  			// reliable?
  1111  			f.failFundingFlow(peer, pendingChanID, timeoutErr)
  1112  		}()
  1113  
  1114  		return timeoutErr
  1115  
  1116  	} else if err != nil {
  1117  		return fmt.Errorf("error waiting for funding "+
  1118  			"confirmation for ChannelPoint(%v): %v",
  1119  			channel.FundingOutpoint, err)
  1120  	}
  1121  
  1122  	// Success, funding transaction was confirmed.
  1123  	chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
  1124  	log.Debugf("ChannelID(%v) is now fully confirmed! "+
  1125  		"(shortChanID=%v)", chanID, confChannel.shortChanID)
  1126  
  1127  	err = f.handleFundingConfirmation(channel, confChannel)
  1128  	if err != nil {
  1129  		return fmt.Errorf("unable to handle funding "+
  1130  			"confirmation for ChannelPoint(%v): %v",
  1131  			channel.FundingOutpoint, err)
  1132  	}
  1133  
  1134  	return nil
  1135  }
  1136  
  1137  // ProcessFundingMsg sends a message to the internal fundingManager goroutine,
  1138  // allowing it to handle the lnwire.Message.
  1139  func (f *Manager) ProcessFundingMsg(msg lnwire.Message, peer lnpeer.Peer) {
  1140  	select {
  1141  	case f.fundingMsgs <- &fundingMsg{msg, peer}:
  1142  	case <-f.quit:
  1143  		return
  1144  	}
  1145  }
  1146  
  1147  // handleFundingOpen creates an initial 'ChannelReservation' within the wallet,
  1148  // then responds to the source peer with an accept channel message progressing
  1149  // the funding workflow.
  1150  //
  1151  // TODO(roasbeef): add error chan to all, let channelManager handle
  1152  // error+propagate
  1153  func (f *Manager) handleFundingOpen(peer lnpeer.Peer,
  1154  	msg *lnwire.OpenChannel) {
  1155  
  1156  	// Check number of pending channels to be smaller than maximum allowed
  1157  	// number and send ErrorGeneric to remote peer if condition is
  1158  	// violated.
  1159  	peerPubKey := peer.IdentityKey()
  1160  	peerIDKey := newSerializedKey(peerPubKey)
  1161  
  1162  	amt := msg.FundingAmount
  1163  
  1164  	// We get all pending channels for this peer. This is the list of the
  1165  	// active reservations and the channels pending open in the database.
  1166  	f.resMtx.RLock()
  1167  	reservations := f.activeReservations[peerIDKey]
  1168  
  1169  	// We don't count reservations that were created from a canned funding
  1170  	// shim. The user has registered the shim and therefore expects this
  1171  	// channel to arrive.
  1172  	numPending := 0
  1173  	for _, res := range reservations {
  1174  		if !res.reservation.IsCannedShim() {
  1175  			numPending++
  1176  		}
  1177  	}
  1178  	f.resMtx.RUnlock()
  1179  
  1180  	// Also count the channels that are already pending. There we don't know
  1181  	// the underlying intent anymore, unfortunately.
  1182  	channels, err := f.cfg.Wallet.Cfg.Database.FetchOpenChannels(peerPubKey)
  1183  	if err != nil {
  1184  		f.failFundingFlow(
  1185  			peer, msg.PendingChannelID, err,
  1186  		)
  1187  		return
  1188  	}
  1189  
  1190  	for _, c := range channels {
  1191  		// Pending channels that have a non-zero thaw height were also
  1192  		// created through a canned funding shim. Those also don't
  1193  		// count towards the DoS protection limit.
  1194  		//
  1195  		// TODO(guggero): Properly store the funding type (wallet, shim,
  1196  		// PSBT) on the channel so we don't need to use the thaw height.
  1197  		if c.IsPending && c.ThawHeight == 0 {
  1198  			numPending++
  1199  		}
  1200  	}
  1201  
  1202  	// TODO(roasbeef): modify to only accept a _single_ pending channel per
  1203  	// block unless white listed
  1204  	if numPending >= f.cfg.MaxPendingChannels {
  1205  		f.failFundingFlow(
  1206  			peer, msg.PendingChannelID,
  1207  			lnwire.ErrMaxPendingChannels,
  1208  		)
  1209  		return
  1210  	}
  1211  
  1212  	// We'll also reject any requests to create channels until we're fully
  1213  	// synced to the network as we won't be able to properly validate the
  1214  	// confirmation of the funding transaction.
  1215  	isSynced, _, err := f.cfg.Wallet.IsSynced()
  1216  	if err != nil || !isSynced {
  1217  		if err != nil {
  1218  			log.Errorf("unable to query wallet: %v", err)
  1219  		}
  1220  		f.failFundingFlow(
  1221  			peer, msg.PendingChannelID,
  1222  			lnwire.ErrSynchronizingChain,
  1223  		)
  1224  		return
  1225  	}
  1226  
  1227  	// Ensure that the remote party respects our maximum channel size.
  1228  	if amt > f.cfg.MaxChanSize {
  1229  		f.failFundingFlow(
  1230  			peer, msg.PendingChannelID,
  1231  			lnwallet.ErrChanTooLarge(amt, f.cfg.MaxChanSize),
  1232  		)
  1233  		return
  1234  	}
  1235  
  1236  	// We'll, also ensure that the remote party isn't attempting to propose
  1237  	// a channel that's below our current min channel size.
  1238  	if amt < f.cfg.MinChanSize {
  1239  		f.failFundingFlow(
  1240  			peer, msg.PendingChannelID,
  1241  			lnwallet.ErrChanTooSmall(amt, f.cfg.MinChanSize),
  1242  		)
  1243  		return
  1244  	}
  1245  
  1246  	// If request specifies non-zero push amount and 'rejectpush' is set,
  1247  	// signal an error.
  1248  	if f.cfg.RejectPush && msg.PushAmount > 0 {
  1249  		f.failFundingFlow(
  1250  			peer, msg.PendingChannelID,
  1251  			lnwallet.ErrNonZeroPushAmount(),
  1252  		)
  1253  		return
  1254  	}
  1255  
  1256  	// Send the OpenChannel request to the ChannelAcceptor to determine whether
  1257  	// this node will accept the channel.
  1258  	chanReq := &chanacceptor.ChannelAcceptRequest{
  1259  		Node:        peer.IdentityKey(),
  1260  		OpenChanMsg: msg,
  1261  	}
  1262  
  1263  	// Query our channel acceptor to determine whether we should reject
  1264  	// the channel.
  1265  	acceptorResp := f.cfg.OpenChannelPredicate.Accept(chanReq)
  1266  	if acceptorResp.RejectChannel() {
  1267  		f.failFundingFlow(
  1268  			peer, msg.PendingChannelID,
  1269  			acceptorResp.ChanAcceptError,
  1270  		)
  1271  		return
  1272  	}
  1273  
  1274  	log.Infof("Recv'd fundingRequest(amt=%v, push=%v, delay=%v, "+
  1275  		"pendingId=%x) from peer(%x)", amt, msg.PushAmount,
  1276  		msg.CsvDelay, msg.PendingChannelID,
  1277  		peer.IdentityKey().SerializeCompressed())
  1278  
  1279  	// Record the peer address only for outbound connections, since inbound
  1280  	// connections are unlikely to be recoverable from our end.
  1281  	var peerAddr net.Addr
  1282  	if !peer.Inbound() {
  1283  		peerAddr = peer.Address()
  1284  	}
  1285  
  1286  	// Attempt to initialize a reservation within the wallet. If the wallet
  1287  	// has insufficient resources to create the channel, then the
  1288  	// reservation attempt may be rejected. Note that since we're on the
  1289  	// responding side of a single funder workflow, we don't commit any
  1290  	// funds to the channel ourselves.
  1291  	//
  1292  	// Before we init the channel, we'll also check to see what commitment
  1293  	// format we can use with this peer. This is dependent on *both* us and
  1294  	// the remote peer are signaling the proper feature bit if we're using
  1295  	// implicit negotiation, and simply the channel type sent over if we're
  1296  	// using explicit negotiation.
  1297  	wasExplicit, _, commitType, err := negotiateCommitmentType(
  1298  		msg.ChannelType, peer.LocalFeatures(), peer.RemoteFeatures(),
  1299  		false,
  1300  	)
  1301  	if err != nil {
  1302  		// TODO(roasbeef): should be using soft errors
  1303  		log.Errorf("channel type negotiation failed: %v", err)
  1304  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1305  		return
  1306  	}
  1307  
  1308  	// Only echo back a channel type in AcceptChannel if we actually used
  1309  	// explicit negotiation above.
  1310  	var chanTypeFeatureBits *lnwire.ChannelType
  1311  	if wasExplicit {
  1312  		chanTypeFeatureBits = msg.ChannelType
  1313  	}
  1314  
  1315  	chainHash := msg.ChainHash
  1316  	req := &lnwallet.InitFundingReserveMsg{
  1317  		ChainHash:        &chainHash,
  1318  		PendingChanID:    msg.PendingChannelID,
  1319  		NodeID:           peer.IdentityKey(),
  1320  		NodeAddr:         peerAddr,
  1321  		LocalFundingAmt:  0,
  1322  		RemoteFundingAmt: amt,
  1323  		CommitFeePerKB:   chainfee.AtomPerKByte(msg.FeePerKiloByte),
  1324  		FundingFeePerKB:  0,
  1325  		PushMAtoms:       msg.PushAmount,
  1326  		Flags:            msg.ChannelFlags,
  1327  		MinConfs:         1,
  1328  		CommitType:       commitType,
  1329  	}
  1330  
  1331  	reservation, err := f.cfg.Wallet.InitChannelReservation(req)
  1332  	if err != nil {
  1333  		log.Errorf("Unable to initialize reservation: %v", err)
  1334  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1335  		return
  1336  	}
  1337  
  1338  	// As we're the responder, we get to specify the number of confirmations
  1339  	// that we require before both of us consider the channel open. We'll
  1340  	// use our mapping to derive the proper number of confirmations based on
  1341  	// the amount of the channel, and also if any funds are being pushed to
  1342  	// us. If a depth value was set by our channel acceptor, we will use
  1343  	// that value instead.
  1344  	numConfsReq := f.cfg.NumRequiredConfs(msg.FundingAmount, msg.PushAmount)
  1345  	if acceptorResp.MinAcceptDepth != 0 {
  1346  		numConfsReq = acceptorResp.MinAcceptDepth
  1347  	}
  1348  	reservation.SetNumConfsRequired(numConfsReq)
  1349  
  1350  	// We'll also validate and apply all the constraints the initiating
  1351  	// party is attempting to dictate for our commitment transaction.
  1352  	channelConstraints := &channeldb.ChannelConstraints{
  1353  		DustLimit:        msg.DustLimit,
  1354  		ChanReserve:      msg.ChannelReserve,
  1355  		MaxPendingAmount: msg.MaxValueInFlight,
  1356  		MinHTLC:          msg.HtlcMinimum,
  1357  		MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs,
  1358  		CsvDelay:         msg.CsvDelay,
  1359  	}
  1360  	err = reservation.CommitConstraints(
  1361  		channelConstraints, f.cfg.MaxLocalCSVDelay, true,
  1362  	)
  1363  	if err != nil {
  1364  		log.Errorf("Unacceptable channel constraints: %v", err)
  1365  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1366  		return
  1367  	}
  1368  
  1369  	// Check whether the peer supports upfront shutdown, and get a new wallet
  1370  	// address if our node is configured to set shutdown addresses by default.
  1371  	// We use the upfront shutdown script provided by our channel acceptor
  1372  	// (if any) in lieu of user input.
  1373  	shutdown, err := getUpfrontShutdownScript(
  1374  		f.cfg.EnableUpfrontShutdown, peer, acceptorResp.UpfrontShutdown,
  1375  		func() (lnwire.DeliveryAddress, error) {
  1376  			addr, err := f.cfg.Wallet.NewAddress(
  1377  				lnwallet.WitnessPubKey, false,
  1378  				lnwallet.DefaultAccountName,
  1379  			)
  1380  			if err != nil {
  1381  				return nil, err
  1382  			}
  1383  			return input.PayToAddrScript(addr)
  1384  		},
  1385  	)
  1386  	if err != nil {
  1387  		f.failFundingFlow(
  1388  			peer, msg.PendingChannelID,
  1389  			fmt.Errorf("getUpfrontShutdownScript error: %v", err),
  1390  		)
  1391  		return
  1392  	}
  1393  	reservation.SetOurUpfrontShutdown(shutdown)
  1394  
  1395  	// If a script enforced channel lease is being proposed, we'll need to
  1396  	// validate its custom TLV records.
  1397  	if commitType == lnwallet.CommitmentTypeScriptEnforcedLease {
  1398  		if msg.LeaseExpiry == nil {
  1399  			err := errors.New("missing lease expiry")
  1400  			f.failFundingFlow(peer, msg.PendingChannelID, err)
  1401  			return
  1402  		}
  1403  
  1404  		// If we had a shim registered for this channel prior to
  1405  		// receiving its corresponding OpenChannel message, then we'll
  1406  		// validate the proposed LeaseExpiry against what was registered
  1407  		// in our shim.
  1408  		if reservation.LeaseExpiry() != 0 {
  1409  			if uint32(*msg.LeaseExpiry) != reservation.LeaseExpiry() {
  1410  				err := errors.New("lease expiry mismatch")
  1411  				f.failFundingFlow(peer, msg.PendingChannelID, err)
  1412  				return
  1413  			}
  1414  		}
  1415  	}
  1416  
  1417  	log.Infof("Requiring %v confirmations for pendingChan(%x): "+
  1418  		"amt=%v, push_amt=%v, committype=%v, upfrontShutdown=%x", numConfsReq,
  1419  		msg.PendingChannelID, amt, msg.PushAmount,
  1420  		commitType, msg.UpfrontShutdownScript)
  1421  
  1422  	// Generate our required constraints for the remote party, using the
  1423  	// values provided by the channel acceptor if they are non-zero.
  1424  	remoteCsvDelay := f.cfg.RequiredRemoteDelay(amt)
  1425  	if acceptorResp.CSVDelay != 0 {
  1426  		remoteCsvDelay = acceptorResp.CSVDelay
  1427  	}
  1428  
  1429  	// If our default dust limit was above their ChannelReserve, we change
  1430  	// it to the ChannelReserve. We must make sure the ChannelReserve we
  1431  	// send in the AcceptChannel message is above both dust limits.
  1432  	// Therefore, take the maximum of msg.DustLimit and our dust limit.
  1433  	//
  1434  	// NOTE: Even with this bounding, the ChannelAcceptor may return an
  1435  	// BOLT#02-invalid ChannelReserve.
  1436  	maxDustLimit := reservation.OurContribution().DustLimit
  1437  	if msg.DustLimit > maxDustLimit {
  1438  		maxDustLimit = msg.DustLimit
  1439  	}
  1440  
  1441  	chanReserve := f.cfg.RequiredRemoteChanReserve(amt, maxDustLimit)
  1442  	if acceptorResp.Reserve != 0 {
  1443  		chanReserve = acceptorResp.Reserve
  1444  	}
  1445  
  1446  	remoteMaxValue := f.cfg.RequiredRemoteMaxValue(amt)
  1447  	if acceptorResp.InFlightTotal != 0 {
  1448  		remoteMaxValue = acceptorResp.InFlightTotal
  1449  	}
  1450  
  1451  	maxHtlcs := f.cfg.RequiredRemoteMaxHTLCs(amt)
  1452  	if acceptorResp.HtlcLimit != 0 {
  1453  		maxHtlcs = acceptorResp.HtlcLimit
  1454  	}
  1455  
  1456  	// Default to our default minimum hltc value, replacing it with the
  1457  	// channel acceptor's value if it is set.
  1458  	minHtlc := f.cfg.DefaultMinHtlcIn
  1459  	if acceptorResp.MinHtlcIn != 0 {
  1460  		minHtlc = acceptorResp.MinHtlcIn
  1461  	}
  1462  
  1463  	// Once the reservation has been created successfully, we add it to
  1464  	// this peer's map of pending reservations to track this particular
  1465  	// reservation until either abort or completion.
  1466  	f.resMtx.Lock()
  1467  	if _, ok := f.activeReservations[peerIDKey]; !ok {
  1468  		f.activeReservations[peerIDKey] = make(pendingChannels)
  1469  	}
  1470  	resCtx := &reservationWithCtx{
  1471  		reservation:    reservation,
  1472  		chanAmt:        amt,
  1473  		remoteCsvDelay: remoteCsvDelay,
  1474  		remoteMinHtlc:  minHtlc,
  1475  		remoteMaxValue: remoteMaxValue,
  1476  		remoteMaxHtlcs: maxHtlcs,
  1477  		maxLocalCsv:    f.cfg.MaxLocalCSVDelay,
  1478  		channelType:    msg.ChannelType,
  1479  		err:            make(chan error, 1),
  1480  		peer:           peer,
  1481  	}
  1482  	f.activeReservations[peerIDKey][msg.PendingChannelID] = resCtx
  1483  	f.resMtx.Unlock()
  1484  
  1485  	// Update the timestamp once the fundingOpenMsg has been handled.
  1486  	defer resCtx.updateTimestamp()
  1487  
  1488  	// With our parameters set, we'll now process their contribution so we
  1489  	// can move the funding workflow ahead.
  1490  	remoteContribution := &lnwallet.ChannelContribution{
  1491  		FundingAmount:        amt,
  1492  		FirstCommitmentPoint: msg.FirstCommitmentPoint,
  1493  		ChannelConfig: &channeldb.ChannelConfig{
  1494  			ChannelConstraints: channeldb.ChannelConstraints{
  1495  				DustLimit:        msg.DustLimit,
  1496  				MaxPendingAmount: remoteMaxValue,
  1497  				ChanReserve:      chanReserve,
  1498  				MinHTLC:          minHtlc,
  1499  				MaxAcceptedHtlcs: maxHtlcs,
  1500  				CsvDelay:         remoteCsvDelay,
  1501  			},
  1502  			MultiSigKey: keychain.KeyDescriptor{
  1503  				PubKey: copyPubKey(msg.FundingKey),
  1504  			},
  1505  			RevocationBasePoint: keychain.KeyDescriptor{
  1506  				PubKey: copyPubKey(msg.RevocationPoint),
  1507  			},
  1508  			PaymentBasePoint: keychain.KeyDescriptor{
  1509  				PubKey: copyPubKey(msg.PaymentPoint),
  1510  			},
  1511  			DelayBasePoint: keychain.KeyDescriptor{
  1512  				PubKey: copyPubKey(msg.DelayedPaymentPoint),
  1513  			},
  1514  			HtlcBasePoint: keychain.KeyDescriptor{
  1515  				PubKey: copyPubKey(msg.HtlcPoint),
  1516  			},
  1517  		},
  1518  		UpfrontShutdown: msg.UpfrontShutdownScript,
  1519  	}
  1520  	err = reservation.ProcessSingleContribution(remoteContribution)
  1521  	if err != nil {
  1522  		log.Errorf("unable to add contribution reservation: %v", err)
  1523  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1524  		return
  1525  	}
  1526  
  1527  	log.Infof("Sending fundingResp for pending_id(%x)",
  1528  		msg.PendingChannelID)
  1529  	log.Debugf("Remote party accepted commitment constraints: %v",
  1530  		spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints))
  1531  
  1532  	// With the initiator's contribution recorded, respond with our
  1533  	// contribution in the next message of the workflow.
  1534  	ourContribution := reservation.OurContribution()
  1535  	fundingAccept := lnwire.AcceptChannel{
  1536  		PendingChannelID:      msg.PendingChannelID,
  1537  		DustLimit:             ourContribution.DustLimit,
  1538  		MaxValueInFlight:      remoteMaxValue,
  1539  		ChannelReserve:        chanReserve,
  1540  		MinAcceptDepth:        uint32(numConfsReq),
  1541  		HtlcMinimum:           minHtlc,
  1542  		CsvDelay:              remoteCsvDelay,
  1543  		MaxAcceptedHTLCs:      maxHtlcs,
  1544  		FundingKey:            ourContribution.MultiSigKey.PubKey,
  1545  		RevocationPoint:       ourContribution.RevocationBasePoint.PubKey,
  1546  		PaymentPoint:          ourContribution.PaymentBasePoint.PubKey,
  1547  		DelayedPaymentPoint:   ourContribution.DelayBasePoint.PubKey,
  1548  		HtlcPoint:             ourContribution.HtlcBasePoint.PubKey,
  1549  		FirstCommitmentPoint:  ourContribution.FirstCommitmentPoint,
  1550  		UpfrontShutdownScript: ourContribution.UpfrontShutdown,
  1551  		ChannelType:           chanTypeFeatureBits,
  1552  		LeaseExpiry:           msg.LeaseExpiry,
  1553  	}
  1554  
  1555  	if err := peer.SendMessage(true, &fundingAccept); err != nil {
  1556  		log.Errorf("unable to send funding response to peer: %v", err)
  1557  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1558  		return
  1559  	}
  1560  }
  1561  
  1562  // handleFundingAccept processes a response to the workflow initiation sent by
  1563  // the remote peer. This message then queues a message with the funding
  1564  // outpoint, and a commitment signature to the remote peer.
  1565  func (f *Manager) handleFundingAccept(peer lnpeer.Peer,
  1566  	msg *lnwire.AcceptChannel) {
  1567  
  1568  	pendingChanID := msg.PendingChannelID
  1569  	peerKey := peer.IdentityKey()
  1570  
  1571  	resCtx, err := f.getReservationCtx(peerKey, pendingChanID)
  1572  	if err != nil {
  1573  		log.Warnf("Can't find reservation (peerKey:%v, chan_id:%v)",
  1574  			peerKey, pendingChanID)
  1575  		return
  1576  	}
  1577  
  1578  	// Update the timestamp once the fundingAcceptMsg has been handled.
  1579  	defer resCtx.updateTimestamp()
  1580  
  1581  	log.Infof("Recv'd fundingResponse for pending_id(%x)",
  1582  		pendingChanID[:])
  1583  
  1584  	// Perform some basic validation of any custom TLV records included.
  1585  	//
  1586  	// TODO: Return errors as funding.Error to give context to remote peer?
  1587  	if resCtx.channelType != nil {
  1588  		// We'll want to quickly check that the ChannelType echoed by
  1589  		// the channel request recipient matches what we proposed.
  1590  		if msg.ChannelType == nil {
  1591  			err := errors.New("explicit channel type not echoed back")
  1592  			f.failFundingFlow(peer, msg.PendingChannelID, err)
  1593  			return
  1594  		}
  1595  		proposedFeatures := lnwire.RawFeatureVector(*resCtx.channelType)
  1596  		ackedFeatures := lnwire.RawFeatureVector(*msg.ChannelType)
  1597  		if !proposedFeatures.Equals(&ackedFeatures) {
  1598  			err := errors.New("channel type mismatch")
  1599  			f.failFundingFlow(peer, msg.PendingChannelID, err)
  1600  			return
  1601  		}
  1602  
  1603  		// We'll want to do the same with the LeaseExpiry if one should
  1604  		// be set.
  1605  		if resCtx.reservation.LeaseExpiry() != 0 {
  1606  			if msg.LeaseExpiry == nil {
  1607  				err := errors.New("lease expiry not echoed back")
  1608  				f.failFundingFlow(peer, msg.PendingChannelID, err)
  1609  				return
  1610  			}
  1611  			if uint32(*msg.LeaseExpiry) != resCtx.reservation.LeaseExpiry() {
  1612  				err := errors.New("lease expiry mismatch")
  1613  				f.failFundingFlow(peer, msg.PendingChannelID, err)
  1614  				return
  1615  			}
  1616  		}
  1617  	} else if msg.ChannelType != nil {
  1618  		// The spec isn't too clear about whether it's okay to set the
  1619  		// channel type in the accept_channel response if we didn't
  1620  		// explicitly set it in the open_channel message. For now, let's
  1621  		// just log the problem instead of failing the funding flow.
  1622  		_, implicitChannelType := implicitNegotiateCommitmentType(
  1623  			peer.LocalFeatures(), peer.RemoteFeatures(),
  1624  		)
  1625  
  1626  		// We pass in false here as the funder since at this point, we
  1627  		// didn't set a chan type ourselves, so falling back to
  1628  		// implicit funding is acceptable.
  1629  		_, _, negotiatedChannelType, err := negotiateCommitmentType(
  1630  			msg.ChannelType, peer.LocalFeatures(),
  1631  			peer.RemoteFeatures(), false,
  1632  		)
  1633  		if err != nil {
  1634  			err := errors.New("received unexpected channel type")
  1635  			f.failFundingFlow(peer, msg.PendingChannelID, err)
  1636  			return
  1637  		}
  1638  
  1639  		// Even though we don't expect a channel type to be set when we
  1640  		// didn't send one in the first place, we check that it's the
  1641  		// same type we'd have arrived through implicit negotiation. If
  1642  		// it's another type, we fail the flow.
  1643  		if implicitChannelType != negotiatedChannelType {
  1644  			err := errors.New("negotiated unexpected channel type")
  1645  			f.failFundingFlow(peer, msg.PendingChannelID, err)
  1646  			return
  1647  		}
  1648  	}
  1649  
  1650  	// The required number of confirmations should not be greater than the
  1651  	// maximum number of confirmations required by the ChainNotifier to
  1652  	// properly dispatch confirmations.
  1653  	if msg.MinAcceptDepth > chainntnfs.MaxNumConfs {
  1654  		err := lnwallet.ErrNumConfsTooLarge(
  1655  			msg.MinAcceptDepth, chainntnfs.MaxNumConfs,
  1656  		)
  1657  		log.Warnf("Unacceptable channel constraints: %v", err)
  1658  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1659  		return
  1660  	}
  1661  
  1662  	// We'll also specify the responder's preference for the number of
  1663  	// required confirmations, and also the set of channel constraints
  1664  	// they've specified for commitment states we can create.
  1665  	resCtx.reservation.SetNumConfsRequired(uint16(msg.MinAcceptDepth))
  1666  	channelConstraints := &channeldb.ChannelConstraints{
  1667  		DustLimit:        msg.DustLimit,
  1668  		ChanReserve:      msg.ChannelReserve,
  1669  		MaxPendingAmount: msg.MaxValueInFlight,
  1670  		MinHTLC:          msg.HtlcMinimum,
  1671  		MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs,
  1672  		CsvDelay:         msg.CsvDelay,
  1673  	}
  1674  	err = resCtx.reservation.CommitConstraints(
  1675  		channelConstraints, resCtx.maxLocalCsv, false,
  1676  	)
  1677  	if err != nil {
  1678  		log.Warnf("Unacceptable channel constraints: %v", err)
  1679  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1680  		return
  1681  	}
  1682  
  1683  	// As they've accepted our channel constraints, we'll regenerate them
  1684  	// here so we can properly commit their accepted constraints to the
  1685  	// reservation. Also make sure that we re-generate the ChannelReserve
  1686  	// with our dust limit or we can get stuck channels.
  1687  	chanReserve := f.cfg.RequiredRemoteChanReserve(
  1688  		resCtx.chanAmt, resCtx.reservation.OurContribution().DustLimit,
  1689  	)
  1690  
  1691  	// The remote node has responded with their portion of the channel
  1692  	// contribution. At this point, we can process their contribution which
  1693  	// allows us to construct and sign both the commitment transaction, and
  1694  	// the funding transaction.
  1695  	remoteContribution := &lnwallet.ChannelContribution{
  1696  		FirstCommitmentPoint: msg.FirstCommitmentPoint,
  1697  		ChannelConfig: &channeldb.ChannelConfig{
  1698  			ChannelConstraints: channeldb.ChannelConstraints{
  1699  				DustLimit:        msg.DustLimit,
  1700  				MaxPendingAmount: resCtx.remoteMaxValue,
  1701  				ChanReserve:      chanReserve,
  1702  				MinHTLC:          resCtx.remoteMinHtlc,
  1703  				MaxAcceptedHtlcs: resCtx.remoteMaxHtlcs,
  1704  				CsvDelay:         resCtx.remoteCsvDelay,
  1705  			},
  1706  			MultiSigKey: keychain.KeyDescriptor{
  1707  				PubKey: copyPubKey(msg.FundingKey),
  1708  			},
  1709  			RevocationBasePoint: keychain.KeyDescriptor{
  1710  				PubKey: copyPubKey(msg.RevocationPoint),
  1711  			},
  1712  			PaymentBasePoint: keychain.KeyDescriptor{
  1713  				PubKey: copyPubKey(msg.PaymentPoint),
  1714  			},
  1715  			DelayBasePoint: keychain.KeyDescriptor{
  1716  				PubKey: copyPubKey(msg.DelayedPaymentPoint),
  1717  			},
  1718  			HtlcBasePoint: keychain.KeyDescriptor{
  1719  				PubKey: copyPubKey(msg.HtlcPoint),
  1720  			},
  1721  		},
  1722  		UpfrontShutdown: msg.UpfrontShutdownScript,
  1723  	}
  1724  	err = resCtx.reservation.ProcessContribution(remoteContribution)
  1725  
  1726  	// The wallet has detected that a PSBT funding process was requested by
  1727  	// the user and has halted the funding process after negotiating the
  1728  	// multisig keys. We now have everything that is needed for the user to
  1729  	// start constructing a PSBT that sends to the multisig funding address.
  1730  	var psbtIntent *chanfunding.PsbtIntent
  1731  	if psbtErr, ok := err.(*lnwallet.PsbtFundingRequired); ok {
  1732  		// Return the information that is needed by the user to
  1733  		// construct the PSBT back to the caller.
  1734  		addr, amt, packet, err := psbtErr.Intent.FundingParams()
  1735  		if err != nil {
  1736  			log.Errorf("Unable to process PSBT funding params "+
  1737  				"for contribution from %v: %v", peerKey, err)
  1738  			f.failFundingFlow(peer, msg.PendingChannelID, err)
  1739  			return
  1740  		}
  1741  		var buf bytes.Buffer
  1742  		err = packet.Serialize(&buf)
  1743  		if err != nil {
  1744  			log.Errorf("Unable to serialize PSBT for "+
  1745  				"contribution from %v: %v", peerKey, err)
  1746  			f.failFundingFlow(peer, msg.PendingChannelID, err)
  1747  			return
  1748  		}
  1749  		resCtx.updates <- &lnrpc.OpenStatusUpdate{
  1750  			PendingChanId: pendingChanID[:],
  1751  			Update: &lnrpc.OpenStatusUpdate_PsbtFund{
  1752  				PsbtFund: &lnrpc.ReadyForPsbtFunding{
  1753  					FundingAddress: addr.String(),
  1754  					FundingAmount:  amt,
  1755  					Psbt:           buf.Bytes(),
  1756  				},
  1757  			},
  1758  		}
  1759  		psbtIntent = psbtErr.Intent
  1760  	} else if err != nil {
  1761  		log.Errorf("Unable to process contribution from %v: %v",
  1762  			peerKey, err)
  1763  		f.failFundingFlow(peer, msg.PendingChannelID, err)
  1764  		return
  1765  	}
  1766  
  1767  	log.Infof("pendingChan(%x): remote party proposes num_confs=%v, "+
  1768  		"csv_delay=%v", pendingChanID[:], msg.MinAcceptDepth, msg.CsvDelay)
  1769  	log.Debugf("Remote party accepted commitment constraints: %v",
  1770  		spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints))
  1771  
  1772  	// If the user requested funding through a PSBT, we cannot directly
  1773  	// continue now and need to wait for the fully funded and signed PSBT
  1774  	// to arrive. To not block any other channels from opening, we wait in
  1775  	// a separate goroutine.
  1776  	if psbtIntent != nil {
  1777  		f.wg.Add(1)
  1778  		go func() {
  1779  			defer f.wg.Done()
  1780  			f.waitForPsbt(psbtIntent, resCtx, pendingChanID)
  1781  		}()
  1782  
  1783  		// With the new goroutine spawned, we can now exit to unblock
  1784  		// the main event loop.
  1785  		return
  1786  	}
  1787  
  1788  	// In a normal, non-PSBT funding flow, we can jump directly to the next
  1789  	// step where we expect our contribution to be finalized.
  1790  	f.continueFundingAccept(resCtx, pendingChanID)
  1791  }
  1792  
  1793  // waitForPsbt blocks until either a signed PSBT arrives, an error occurs or
  1794  // the funding manager shuts down. In the case of a valid PSBT, the funding flow
  1795  // is continued.
  1796  //
  1797  // NOTE: This method must be called as a goroutine.
  1798  func (f *Manager) waitForPsbt(intent *chanfunding.PsbtIntent,
  1799  	resCtx *reservationWithCtx, pendingChanID [32]byte) {
  1800  
  1801  	// failFlow is a helper that logs an error message with the current
  1802  	// context and then fails the funding flow.
  1803  	peerKey := resCtx.peer.IdentityKey()
  1804  	failFlow := func(errMsg string, cause error) {
  1805  		log.Errorf("Unable to handle funding accept message "+
  1806  			"for peer_key=%x, pending_chan_id=%x: %s: %v",
  1807  			peerKey.SerializeCompressed(), pendingChanID, errMsg,
  1808  			cause)
  1809  		f.failFundingFlow(resCtx.peer, pendingChanID, cause)
  1810  	}
  1811  
  1812  	// We'll now wait until the intent has received the final and complete
  1813  	// funding transaction. If the channel is closed without any error being
  1814  	// sent, we know everything's going as expected.
  1815  	select {
  1816  	case err := <-intent.PsbtReady:
  1817  		switch err {
  1818  		// If the user canceled the funding reservation, we need to
  1819  		// inform the other peer about us canceling the reservation.
  1820  		case chanfunding.ErrUserCanceled:
  1821  			failFlow("aborting PSBT flow", err)
  1822  			return
  1823  
  1824  		// If the remote canceled the funding reservation, we don't need
  1825  		// to send another fail message. But we want to inform the user
  1826  		// about what happened.
  1827  		case chanfunding.ErrRemoteCanceled:
  1828  			log.Infof("Remote canceled, aborting PSBT flow "+
  1829  				"for peer_key=%x, pending_chan_id=%x",
  1830  				peerKey.SerializeCompressed(), pendingChanID)
  1831  			return
  1832  
  1833  		// Nil error means the flow continues normally now.
  1834  		case nil:
  1835  
  1836  		// For any other error, we'll fail the funding flow.
  1837  		default:
  1838  			failFlow("error waiting for PSBT flow", err)
  1839  			return
  1840  		}
  1841  
  1842  		// A non-nil error means we can continue the funding flow.
  1843  		// Notify the wallet so it can prepare everything we need to
  1844  		// continue.
  1845  		err = resCtx.reservation.ProcessPsbt()
  1846  		if err != nil {
  1847  			failFlow("error continuing PSBT flow", err)
  1848  			return
  1849  		}
  1850  
  1851  		// We are now ready to continue the funding flow.
  1852  		f.continueFundingAccept(resCtx, pendingChanID)
  1853  
  1854  	// Handle a server shutdown as well because the reservation won't
  1855  	// survive a restart as it's in memory only.
  1856  	case <-f.quit:
  1857  		log.Errorf("Unable to handle funding accept message "+
  1858  			"for peer_key=%x, pending_chan_id=%x: funding manager "+
  1859  			"shutting down", peerKey.SerializeCompressed(),
  1860  			pendingChanID)
  1861  		return
  1862  	}
  1863  }
  1864  
  1865  // continueFundingAccept continues the channel funding flow once our
  1866  // contribution is finalized, the channel output is known and the funding
  1867  // transaction is signed.
  1868  func (f *Manager) continueFundingAccept(resCtx *reservationWithCtx,
  1869  	pendingChanID [32]byte) {
  1870  
  1871  	// Now that we have their contribution, we can extract, then send over
  1872  	// both the funding out point and our signature for their version of
  1873  	// the commitment transaction to the remote peer.
  1874  	outPoint := resCtx.reservation.FundingOutpoint()
  1875  	_, sig := resCtx.reservation.OurSignatures()
  1876  
  1877  	// A new channel has almost finished the funding process. In order to
  1878  	// properly synchronize with the writeHandler goroutine, we add a new
  1879  	// channel to the barriers map which will be closed once the channel is
  1880  	// fully open.
  1881  	f.barrierMtx.Lock()
  1882  	channelID := lnwire.NewChanIDFromOutPoint(outPoint)
  1883  	log.Debugf("Creating chan barrier for ChanID(%v)", channelID)
  1884  	f.newChanBarriers[channelID] = make(chan struct{})
  1885  	f.barrierMtx.Unlock()
  1886  
  1887  	// The next message that advances the funding flow will reference the
  1888  	// channel via its permanent channel ID, so we'll set up this mapping
  1889  	// so we can retrieve the reservation context once we get the
  1890  	// FundingSigned message.
  1891  	f.resMtx.Lock()
  1892  	f.signedReservations[channelID] = pendingChanID
  1893  	f.resMtx.Unlock()
  1894  
  1895  	log.Infof("Generated ChannelPoint(%v) for pending_id(%x)", outPoint,
  1896  		pendingChanID[:])
  1897  
  1898  	var err error
  1899  	fundingCreated := &lnwire.FundingCreated{
  1900  		PendingChannelID: pendingChanID,
  1901  		FundingPoint:     *outPoint,
  1902  	}
  1903  	fundingCreated.CommitSig, err = lnwire.NewSigFromSignature(sig)
  1904  	if err != nil {
  1905  		log.Errorf("Unable to parse signature: %v", err)
  1906  		f.failFundingFlow(resCtx.peer, pendingChanID, err)
  1907  		return
  1908  	}
  1909  	if err := resCtx.peer.SendMessage(true, fundingCreated); err != nil {
  1910  		log.Errorf("Unable to send funding complete message: %v", err)
  1911  		f.failFundingFlow(resCtx.peer, pendingChanID, err)
  1912  		return
  1913  	}
  1914  }
  1915  
  1916  // handleFundingCreated progresses the funding workflow when the daemon is on
  1917  // the responding side of a single funder workflow. Once this message has been
  1918  // processed, a signature is sent to the remote peer allowing it to broadcast
  1919  // the funding transaction, progressing the workflow into the final stage.
  1920  func (f *Manager) handleFundingCreated(peer lnpeer.Peer,
  1921  	msg *lnwire.FundingCreated) {
  1922  
  1923  	peerKey := peer.IdentityKey()
  1924  	pendingChanID := msg.PendingChannelID
  1925  
  1926  	resCtx, err := f.getReservationCtx(peerKey, pendingChanID)
  1927  	if err != nil {
  1928  		log.Warnf("can't find reservation (peer_id:%v, chan_id:%x)",
  1929  			peerKey, pendingChanID[:])
  1930  		return
  1931  	}
  1932  
  1933  	// The channel initiator has responded with the funding outpoint of the
  1934  	// final funding transaction, as well as a signature for our version of
  1935  	// the commitment transaction. So at this point, we can validate the
  1936  	// initiator's commitment transaction, then send our own if it's valid.
  1937  	// TODO(roasbeef): make case (p vs P) consistent throughout
  1938  	fundingOut := msg.FundingPoint
  1939  	log.Infof("completing pending_id(%x) with ChannelPoint(%v)",
  1940  		pendingChanID[:], fundingOut)
  1941  
  1942  	commitSig, err := msg.CommitSig.ToSignature()
  1943  	if err != nil {
  1944  		log.Errorf("unable to parse signature: %v", err)
  1945  		f.failFundingFlow(peer, pendingChanID, err)
  1946  		return
  1947  	}
  1948  
  1949  	// With all the necessary data available, attempt to advance the
  1950  	// funding workflow to the next stage. If this succeeds then the
  1951  	// funding transaction will broadcast after our next message.
  1952  	// CompleteReservationSingle will also mark the channel as 'IsPending'
  1953  	// in the database.
  1954  	completeChan, err := resCtx.reservation.CompleteReservationSingle(
  1955  		&fundingOut, commitSig,
  1956  	)
  1957  	if err != nil {
  1958  		// TODO(roasbeef): better error logging: peerID, channelID, etc.
  1959  		log.Errorf("unable to complete single reservation: %v", err)
  1960  		f.failFundingFlow(peer, pendingChanID, err)
  1961  		return
  1962  	}
  1963  
  1964  	// The channel is marked IsPending in the database, and can be removed
  1965  	// from the set of active reservations.
  1966  	f.deleteReservationCtx(peerKey, msg.PendingChannelID)
  1967  
  1968  	// If something goes wrong before the funding transaction is confirmed,
  1969  	// we use this convenience method to delete the pending OpenChannel
  1970  	// from the database.
  1971  	deleteFromDatabase := func() {
  1972  		localBalance := completeChan.LocalCommitment.LocalBalance.ToAtoms()
  1973  		closeInfo := &channeldb.ChannelCloseSummary{
  1974  			ChanPoint:               completeChan.FundingOutpoint,
  1975  			ChainHash:               completeChan.ChainHash,
  1976  			RemotePub:               completeChan.IdentityPub,
  1977  			CloseType:               channeldb.FundingCanceled,
  1978  			Capacity:                completeChan.Capacity,
  1979  			SettledBalance:          localBalance,
  1980  			RemoteCurrentRevocation: completeChan.RemoteCurrentRevocation,
  1981  			RemoteNextRevocation:    completeChan.RemoteNextRevocation,
  1982  			LocalChanConfig:         completeChan.LocalChanCfg,
  1983  		}
  1984  
  1985  		// Close the channel with us as the initiator because we are
  1986  		// deciding to exit the funding flow due to an internal error.
  1987  		if err := completeChan.CloseChannel(
  1988  			closeInfo, channeldb.ChanStatusLocalCloseInitiator,
  1989  		); err != nil {
  1990  			log.Errorf("Failed closing channel %v: %v",
  1991  				completeChan.FundingOutpoint, err)
  1992  		}
  1993  	}
  1994  
  1995  	// A new channel has almost finished the funding process. In order to
  1996  	// properly synchronize with the writeHandler goroutine, we add a new
  1997  	// channel to the barriers map which will be closed once the channel is
  1998  	// fully open.
  1999  	f.barrierMtx.Lock()
  2000  	channelID := lnwire.NewChanIDFromOutPoint(&fundingOut)
  2001  	log.Debugf("Creating chan barrier for ChanID(%v)", channelID)
  2002  	f.newChanBarriers[channelID] = make(chan struct{})
  2003  	f.barrierMtx.Unlock()
  2004  
  2005  	log.Infof("sending FundingSigned for pending_id(%x) over "+
  2006  		"ChannelPoint(%v)", pendingChanID[:], fundingOut)
  2007  
  2008  	// With their signature for our version of the commitment transaction
  2009  	// verified, we can now send over our signature to the remote peer.
  2010  	_, sig := resCtx.reservation.OurSignatures()
  2011  	ourCommitSig, err := lnwire.NewSigFromSignature(sig)
  2012  	if err != nil {
  2013  		log.Errorf("unable to parse signature: %v", err)
  2014  		f.failFundingFlow(peer, pendingChanID, err)
  2015  		deleteFromDatabase()
  2016  		return
  2017  	}
  2018  
  2019  	fundingSigned := &lnwire.FundingSigned{
  2020  		ChanID:    channelID,
  2021  		CommitSig: ourCommitSig,
  2022  	}
  2023  	if err := peer.SendMessage(true, fundingSigned); err != nil {
  2024  		log.Errorf("unable to send FundingSigned message: %v", err)
  2025  		f.failFundingFlow(peer, pendingChanID, err)
  2026  		deleteFromDatabase()
  2027  		return
  2028  	}
  2029  
  2030  	// Now that we've sent over our final signature for this channel, we'll
  2031  	// send it to the ChainArbitrator so it can watch for any on-chain
  2032  	// actions during this final confirmation stage.
  2033  	if err := f.cfg.WatchNewChannel(completeChan, peerKey); err != nil {
  2034  		log.Errorf("Unable to send new ChannelPoint(%v) for "+
  2035  			"arbitration: %v", fundingOut, err)
  2036  	}
  2037  
  2038  	// Create an entry in the local discovery map so we can ensure that we
  2039  	// process the channel confirmation fully before we receive a funding
  2040  	// locked message.
  2041  	f.localDiscoveryMtx.Lock()
  2042  	f.localDiscoverySignals[channelID] = make(chan struct{})
  2043  	f.localDiscoveryMtx.Unlock()
  2044  
  2045  	// Inform the ChannelNotifier that the channel has entered
  2046  	// pending open state.
  2047  	f.cfg.NotifyPendingOpenChannelEvent(fundingOut, completeChan)
  2048  
  2049  	// At this point we have sent our last funding message to the
  2050  	// initiating peer before the funding transaction will be broadcast.
  2051  	// With this last message, our job as the responder is now complete.
  2052  	// We'll wait for the funding transaction to reach the specified number
  2053  	// of confirmations, then start normal operations.
  2054  	//
  2055  	// When we get to this point we have sent the signComplete message to
  2056  	// the channel funder, and BOLT#2 specifies that we MUST remember the
  2057  	// channel for reconnection. The channel is already marked
  2058  	// as pending in the database, so in case of a disconnect or restart,
  2059  	// we will continue waiting for the confirmation the next time we start
  2060  	// the funding manager. In case the funding transaction never appears
  2061  	// on the blockchain, we must forget this channel. We therefore
  2062  	// completely forget about this channel if we haven't seen the funding
  2063  	// transaction in 288 blocks (~ 48 hrs), by canceling the reservation
  2064  	// and canceling the wait for the funding confirmation.
  2065  	f.wg.Add(1)
  2066  	go f.advanceFundingState(completeChan, pendingChanID, nil)
  2067  }
  2068  
  2069  // handleFundingSigned processes the final message received in a single funder
  2070  // workflow. Once this message is processed, the funding transaction is
  2071  // broadcast. Once the funding transaction reaches a sufficient number of
  2072  // confirmations, a message is sent to the responding peer along with a compact
  2073  // encoding of the location of the channel within the blockchain.
  2074  func (f *Manager) handleFundingSigned(peer lnpeer.Peer,
  2075  	msg *lnwire.FundingSigned) {
  2076  
  2077  	// As the funding signed message will reference the reservation by its
  2078  	// permanent channel ID, we'll need to perform an intermediate look up
  2079  	// before we can obtain the reservation.
  2080  	f.resMtx.Lock()
  2081  	pendingChanID, ok := f.signedReservations[msg.ChanID]
  2082  	delete(f.signedReservations, msg.ChanID)
  2083  	f.resMtx.Unlock()
  2084  	if !ok {
  2085  		err := fmt.Errorf("unable to find signed reservation for "+
  2086  			"chan_id=%x", msg.ChanID)
  2087  		log.Warnf(err.Error())
  2088  		f.failFundingFlow(peer, msg.ChanID, err)
  2089  		return
  2090  	}
  2091  
  2092  	peerKey := peer.IdentityKey()
  2093  	resCtx, err := f.getReservationCtx(peerKey, pendingChanID)
  2094  	if err != nil {
  2095  		log.Warnf("Unable to find reservation (peer_id:%v, "+
  2096  			"chan_id:%x)", peerKey, pendingChanID[:])
  2097  		// TODO: add ErrChanNotFound?
  2098  		f.failFundingFlow(peer, pendingChanID, err)
  2099  		return
  2100  	}
  2101  
  2102  	// Create an entry in the local discovery map so we can ensure that we
  2103  	// process the channel confirmation fully before we receive a funding
  2104  	// locked message.
  2105  	fundingPoint := resCtx.reservation.FundingOutpoint()
  2106  	permChanID := lnwire.NewChanIDFromOutPoint(fundingPoint)
  2107  	f.localDiscoveryMtx.Lock()
  2108  	f.localDiscoverySignals[permChanID] = make(chan struct{})
  2109  	f.localDiscoveryMtx.Unlock()
  2110  
  2111  	// The remote peer has responded with a signature for our commitment
  2112  	// transaction. We'll verify the signature for validity, then commit
  2113  	// the state to disk as we can now open the channel.
  2114  	commitSig, err := msg.CommitSig.ToSignature()
  2115  	if err != nil {
  2116  		log.Errorf("Unable to parse signature: %v", err)
  2117  		f.failFundingFlow(peer, pendingChanID, err)
  2118  		return
  2119  	}
  2120  
  2121  	completeChan, err := resCtx.reservation.CompleteReservation(
  2122  		nil, commitSig,
  2123  	)
  2124  	if err != nil {
  2125  		log.Errorf("Unable to complete reservation sign "+
  2126  			"complete: %v", err)
  2127  		f.failFundingFlow(peer, pendingChanID, err)
  2128  		return
  2129  	}
  2130  
  2131  	// The channel is now marked IsPending in the database, and we can
  2132  	// delete it from our set of active reservations.
  2133  	f.deleteReservationCtx(peerKey, pendingChanID)
  2134  
  2135  	// Broadcast the finalized funding transaction to the network, but only
  2136  	// if we actually have the funding transaction.
  2137  	if completeChan.ChanType.HasFundingTx() {
  2138  		fundingTx := completeChan.FundingTxn
  2139  		var fundingTxBuf bytes.Buffer
  2140  		if err := fundingTx.Serialize(&fundingTxBuf); err != nil {
  2141  			log.Errorf("Unable to serialize funding "+
  2142  				"transaction %v: %v", fundingTx.TxHash(), err)
  2143  
  2144  			// Clear the buffer of any bytes that were written
  2145  			// before the serialization error to prevent logging an
  2146  			// incomplete transaction.
  2147  			fundingTxBuf.Reset()
  2148  		}
  2149  
  2150  		log.Infof("Broadcasting funding tx for ChannelPoint(%v): %x",
  2151  			completeChan.FundingOutpoint, fundingTxBuf.Bytes())
  2152  
  2153  		// Set a nil short channel ID at this stage because we do not
  2154  		// know it until our funding tx confirms.
  2155  		label := labels.MakeLabel(
  2156  			labels.LabelTypeChannelOpen, nil,
  2157  		)
  2158  
  2159  		err = f.cfg.PublishTransaction(fundingTx, label)
  2160  		if err != nil {
  2161  			log.Errorf("Unable to broadcast funding tx %x for "+
  2162  				"ChannelPoint(%v): %v", fundingTxBuf.Bytes(),
  2163  				completeChan.FundingOutpoint, err)
  2164  
  2165  			// We failed to broadcast the funding transaction, but
  2166  			// watch the channel regardless, in case the
  2167  			// transaction made it to the network. We will retry
  2168  			// broadcast at startup.
  2169  			//
  2170  			// TODO(halseth): retry more often? Handle with CPFP?
  2171  			// Just delete from the DB?
  2172  		}
  2173  	}
  2174  
  2175  	// Now that we have a finalized reservation for this funding flow,
  2176  	// we'll send the to be active channel to the ChainArbitrator so it can
  2177  	// watch for any on-chain actions before the channel has fully
  2178  	// confirmed.
  2179  	if err := f.cfg.WatchNewChannel(completeChan, peerKey); err != nil {
  2180  		log.Errorf("Unable to send new ChannelPoint(%v) for "+
  2181  			"arbitration: %v", fundingPoint, err)
  2182  	}
  2183  
  2184  	log.Infof("Finalizing pending_id(%x) over ChannelPoint(%v), "+
  2185  		"waiting for channel open on-chain", pendingChanID[:],
  2186  		fundingPoint)
  2187  
  2188  	// Send an update to the upstream client that the negotiation process
  2189  	// is over.
  2190  	//
  2191  	// TODO(roasbeef): add abstraction over updates to accommodate
  2192  	// long-polling, or SSE, etc.
  2193  	upd := &lnrpc.OpenStatusUpdate{
  2194  		Update: &lnrpc.OpenStatusUpdate_ChanPending{
  2195  			ChanPending: &lnrpc.PendingUpdate{
  2196  				Txid:        fundingPoint.Hash[:],
  2197  				OutputIndex: fundingPoint.Index,
  2198  			},
  2199  		},
  2200  		PendingChanId: pendingChanID[:],
  2201  	}
  2202  
  2203  	select {
  2204  	case resCtx.updates <- upd:
  2205  		// Inform the ChannelNotifier that the channel has entered
  2206  		// pending open state.
  2207  		f.cfg.NotifyPendingOpenChannelEvent(*fundingPoint, completeChan)
  2208  	case <-f.quit:
  2209  		return
  2210  	}
  2211  
  2212  	// At this point we have broadcast the funding transaction and done all
  2213  	// necessary processing.
  2214  	f.wg.Add(1)
  2215  	go f.advanceFundingState(completeChan, pendingChanID, resCtx.updates)
  2216  }
  2217  
  2218  // confirmedChannel wraps a confirmed funding transaction, as well as the short
  2219  // channel ID which identifies that channel into a single struct. We'll use
  2220  // this to pass around the final state of a channel after it has been
  2221  // confirmed.
  2222  type confirmedChannel struct {
  2223  	// shortChanID expresses where in the block the funding transaction was
  2224  	// located.
  2225  	shortChanID lnwire.ShortChannelID
  2226  
  2227  	// fundingTx is the funding transaction that created the channel.
  2228  	fundingTx *wire.MsgTx
  2229  }
  2230  
  2231  // waitForFundingWithTimeout is a wrapper around waitForFundingConfirmation and
  2232  // waitForTimeout that will return ErrConfirmationTimeout if we are not the
  2233  // channel initiator and the maxWaitNumBlocksFundingConf has passed from the
  2234  // funding broadcast height. In case of confirmation, the short channel ID of
  2235  // the channel and the funding transaction will be returned.
  2236  func (f *Manager) waitForFundingWithTimeout(
  2237  	ch *channeldb.OpenChannel) (*confirmedChannel, error) {
  2238  
  2239  	confChan := make(chan *confirmedChannel)
  2240  	timeoutChan := make(chan error, 1)
  2241  	cancelChan := make(chan struct{})
  2242  
  2243  	f.wg.Add(1)
  2244  	go f.waitForFundingConfirmation(ch, cancelChan, confChan)
  2245  
  2246  	// If we are not the initiator, we have no money at stake and will
  2247  	// timeout waiting for the funding transaction to confirm after a
  2248  	// while.
  2249  	if !ch.IsInitiator {
  2250  		f.wg.Add(1)
  2251  		go f.waitForTimeout(ch, cancelChan, timeoutChan)
  2252  	}
  2253  	defer close(cancelChan)
  2254  
  2255  	select {
  2256  	case err := <-timeoutChan:
  2257  		if err != nil {
  2258  			return nil, err
  2259  		}
  2260  		return nil, ErrConfirmationTimeout
  2261  
  2262  	case <-f.quit:
  2263  		// The fundingManager is shutting down, and will resume wait on
  2264  		// startup.
  2265  		return nil, ErrFundingManagerShuttingDown
  2266  
  2267  	case confirmedChannel, ok := <-confChan:
  2268  		if !ok {
  2269  			return nil, fmt.Errorf("waiting for funding" +
  2270  				"confirmation failed")
  2271  		}
  2272  		return confirmedChannel, nil
  2273  	}
  2274  }
  2275  
  2276  // makeFundingScript re-creates the funding script for the funding transaction
  2277  // of the target channel.
  2278  func makeFundingScript(channel *channeldb.OpenChannel) ([]byte, error) {
  2279  	localKey := channel.LocalChanCfg.MultiSigKey.PubKey.SerializeCompressed()
  2280  	remoteKey := channel.RemoteChanCfg.MultiSigKey.PubKey.SerializeCompressed()
  2281  
  2282  	multiSigScript, err := input.GenMultiSigScript(localKey, remoteKey)
  2283  	if err != nil {
  2284  		return nil, err
  2285  	}
  2286  
  2287  	return input.ScriptHashPkScript(multiSigScript)
  2288  }
  2289  
  2290  // waitForFundingConfirmation handles the final stages of the channel funding
  2291  // process once the funding transaction has been broadcast. The primary
  2292  // function of waitForFundingConfirmation is to wait for blockchain
  2293  // confirmation, and then to notify the other systems that must be notified
  2294  // when a channel has become active for lightning transactions.
  2295  // The wait can be canceled by closing the cancelChan. In case of success,
  2296  // a *lnwire.ShortChannelID will be passed to confChan.
  2297  //
  2298  // NOTE: This MUST be run as a goroutine.
  2299  func (f *Manager) waitForFundingConfirmation(
  2300  	completeChan *channeldb.OpenChannel, cancelChan <-chan struct{},
  2301  	confChan chan<- *confirmedChannel) {
  2302  
  2303  	defer f.wg.Done()
  2304  	defer close(confChan)
  2305  
  2306  	// Register with the ChainNotifier for a notification once the funding
  2307  	// transaction reaches `numConfs` confirmations.
  2308  	txid := completeChan.FundingOutpoint.Hash
  2309  	fundingScript, err := makeFundingScript(completeChan)
  2310  	if err != nil {
  2311  		log.Errorf("unable to create funding script for "+
  2312  			"ChannelPoint(%v): %v", completeChan.FundingOutpoint,
  2313  			err)
  2314  		return
  2315  	}
  2316  	numConfs := uint32(completeChan.NumConfsRequired)
  2317  	confNtfn, err := f.cfg.Notifier.RegisterConfirmationsNtfn(
  2318  		&txid, fundingScript, numConfs,
  2319  		completeChan.FundingBroadcastHeight,
  2320  	)
  2321  	if err != nil {
  2322  		log.Errorf("Unable to register for confirmation of "+
  2323  			"ChannelPoint(%v): %v", completeChan.FundingOutpoint,
  2324  			err)
  2325  		return
  2326  	}
  2327  
  2328  	log.Infof("Waiting for funding tx (%v) to reach %v confirmations",
  2329  		txid, numConfs)
  2330  
  2331  	var confDetails *chainntnfs.TxConfirmation
  2332  	var ok bool
  2333  
  2334  	// Wait until the specified number of confirmations has been reached,
  2335  	// we get a cancel signal, or the wallet signals a shutdown.
  2336  	select {
  2337  	case confDetails, ok = <-confNtfn.Confirmed:
  2338  		// fallthrough
  2339  
  2340  	case <-cancelChan:
  2341  		log.Warnf("canceled waiting for funding confirmation, "+
  2342  			"stopping funding flow for ChannelPoint(%v)",
  2343  			completeChan.FundingOutpoint)
  2344  		return
  2345  
  2346  	case <-f.quit:
  2347  		log.Warnf("fundingManager shutting down, stopping funding "+
  2348  			"flow for ChannelPoint(%v)",
  2349  			completeChan.FundingOutpoint)
  2350  		return
  2351  	}
  2352  
  2353  	if !ok {
  2354  		log.Warnf("ChainNotifier shutting down, cannot complete "+
  2355  			"funding flow for ChannelPoint(%v)",
  2356  			completeChan.FundingOutpoint)
  2357  		return
  2358  	}
  2359  
  2360  	fundingPoint := completeChan.FundingOutpoint
  2361  	log.Infof("ChannelPoint(%v) is now active: ChannelID(%v)",
  2362  		fundingPoint, lnwire.NewChanIDFromOutPoint(&fundingPoint))
  2363  
  2364  	// With the block height and the transaction index known, we can
  2365  	// construct the compact chanID which is used on the network to unique
  2366  	// identify channels.
  2367  	shortChanID := lnwire.ShortChannelID{
  2368  		BlockHeight: confDetails.BlockHeight,
  2369  		TxIndex:     confDetails.TxIndex,
  2370  		TxPosition:  uint16(fundingPoint.Index),
  2371  	}
  2372  
  2373  	select {
  2374  	case confChan <- &confirmedChannel{
  2375  		shortChanID: shortChanID,
  2376  		fundingTx:   confDetails.Tx,
  2377  	}:
  2378  	case <-f.quit:
  2379  		return
  2380  	}
  2381  }
  2382  
  2383  // waitForTimeout will close the timeout channel if maxWaitNumBlocksFundingConf
  2384  // has passed from the broadcast height of the given channel. In case of error,
  2385  // the error is sent on timeoutChan. The wait can be canceled by closing the
  2386  // cancelChan.
  2387  //
  2388  // NOTE: timeoutChan MUST be buffered.
  2389  // NOTE: This MUST be run as a goroutine.
  2390  func (f *Manager) waitForTimeout(completeChan *channeldb.OpenChannel,
  2391  	cancelChan <-chan struct{}, timeoutChan chan<- error) {
  2392  	defer f.wg.Done()
  2393  
  2394  	epochClient, err := f.cfg.Notifier.RegisterBlockEpochNtfn(nil)
  2395  	if err != nil {
  2396  		timeoutChan <- fmt.Errorf("unable to register for epoch "+
  2397  			"notification: %v", err)
  2398  		return
  2399  	}
  2400  
  2401  	defer epochClient.Cancel()
  2402  
  2403  	// On block maxHeight we will cancel the funding confirmation wait.
  2404  	maxHeight := completeChan.FundingBroadcastHeight + maxWaitNumBlocksFundingConf
  2405  	for {
  2406  		select {
  2407  		case epoch, ok := <-epochClient.Epochs:
  2408  			if !ok {
  2409  				timeoutChan <- fmt.Errorf("epoch client " +
  2410  					"shutting down")
  2411  				return
  2412  			}
  2413  
  2414  			// Close the timeout channel and exit if the block is
  2415  			// aboce the max height.
  2416  			if uint32(epoch.Height) >= maxHeight {
  2417  				log.Warnf("Waited for %v blocks without "+
  2418  					"seeing funding transaction confirmed,"+
  2419  					" cancelling.",
  2420  					maxWaitNumBlocksFundingConf)
  2421  
  2422  				// Notify the caller of the timeout.
  2423  				close(timeoutChan)
  2424  				return
  2425  			}
  2426  
  2427  			// TODO: If we are the channel initiator implement
  2428  			// a method for recovering the funds from the funding
  2429  			// transaction
  2430  
  2431  		case <-cancelChan:
  2432  			return
  2433  
  2434  		case <-f.quit:
  2435  			// The fundingManager is shutting down, will resume
  2436  			// waiting for the funding transaction on startup.
  2437  			return
  2438  		}
  2439  	}
  2440  }
  2441  
  2442  // handleFundingConfirmation marks a channel as open in the database, and set
  2443  // the channelOpeningState markedOpen. In addition it will report the now
  2444  // decided short channel ID to the switch, and close the local discovery signal
  2445  // for this channel.
  2446  func (f *Manager) handleFundingConfirmation(
  2447  	completeChan *channeldb.OpenChannel,
  2448  	confChannel *confirmedChannel) error {
  2449  
  2450  	fundingPoint := completeChan.FundingOutpoint
  2451  	chanID := lnwire.NewChanIDFromOutPoint(&fundingPoint)
  2452  
  2453  	// TODO(roasbeef): ideally persistent state update for chan above
  2454  	// should be abstracted
  2455  
  2456  	// Now that that the channel has been fully confirmed, we'll request
  2457  	// that the wallet fully verify this channel to ensure that it can be
  2458  	// used.
  2459  	err := f.cfg.Wallet.ValidateChannel(completeChan, confChannel.fundingTx)
  2460  	if err != nil {
  2461  		// TODO(roasbeef): delete chan state?
  2462  		return fmt.Errorf("unable to validate channel: %v", err)
  2463  	}
  2464  
  2465  	// The funding transaction now being confirmed, we add this channel to
  2466  	// the fundingManager's internal persistent state machine that we use
  2467  	// to track the remaining process of the channel opening. This is
  2468  	// useful to resume the opening process in case of restarts. We set the
  2469  	// opening state before we mark the channel opened in the database,
  2470  	// such that we can receover from one of the db writes failing.
  2471  	err = f.saveChannelOpeningState(
  2472  		&fundingPoint, markedOpen, &confChannel.shortChanID,
  2473  	)
  2474  	if err != nil {
  2475  		return fmt.Errorf("error setting channel state to markedOpen: %v",
  2476  			err)
  2477  	}
  2478  
  2479  	// Now that the channel has been fully confirmed and we successfully
  2480  	// saved the opening state, we'll mark it as open within the database.
  2481  	err = completeChan.MarkAsOpen(confChannel.shortChanID)
  2482  	if err != nil {
  2483  		return fmt.Errorf("error setting channel pending flag to false: "+
  2484  			"%v", err)
  2485  	}
  2486  
  2487  	// Inform the ChannelNotifier that the channel has transitioned from
  2488  	// pending open to open.
  2489  	f.cfg.NotifyOpenChannelEvent(completeChan.FundingOutpoint)
  2490  
  2491  	// As there might already be an active link in the switch with an
  2492  	// outdated short chan ID, we'll instruct the switch to load the updated
  2493  	// short chan id from disk.
  2494  	err = f.cfg.ReportShortChanID(fundingPoint)
  2495  	if err != nil {
  2496  		log.Errorf("unable to report short chan id: %v", err)
  2497  	}
  2498  
  2499  	// If we opened the channel, and lnd's wallet published our funding tx
  2500  	// (which is not the case for some channels) then we update our
  2501  	// transaction label with our short channel ID, which is known now that
  2502  	// our funding transaction has confirmed. We do not label transactions
  2503  	// we did not publish, because our wallet has no knowledge of them.
  2504  	if completeChan.IsInitiator && completeChan.ChanType.HasFundingTx() {
  2505  		shortChanID := completeChan.ShortChanID()
  2506  		label := labels.MakeLabel(
  2507  			labels.LabelTypeChannelOpen, &shortChanID,
  2508  		)
  2509  
  2510  		err = f.cfg.UpdateLabel(
  2511  			completeChan.FundingOutpoint.Hash, label,
  2512  		)
  2513  		if err != nil {
  2514  			log.Errorf("unable to update label: %v", err)
  2515  		}
  2516  	}
  2517  
  2518  	// Close the discoverySignal channel, indicating to a separate
  2519  	// goroutine that the channel now is marked as open in the database
  2520  	// and that it is acceptable to process funding locked messages
  2521  	// from the peer.
  2522  	f.localDiscoveryMtx.Lock()
  2523  	if discoverySignal, ok := f.localDiscoverySignals[chanID]; ok {
  2524  		close(discoverySignal)
  2525  	}
  2526  	f.localDiscoveryMtx.Unlock()
  2527  
  2528  	return nil
  2529  }
  2530  
  2531  // sendFundingLocked creates and sends the fundingLocked message.
  2532  // This should be called after the funding transaction has been confirmed,
  2533  // and the channelState is 'markedOpen'.
  2534  func (f *Manager) sendFundingLocked(
  2535  	completeChan *channeldb.OpenChannel, channel *lnwallet.LightningChannel,
  2536  	shortChanID *lnwire.ShortChannelID) error {
  2537  
  2538  	chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint)
  2539  
  2540  	var peerKey [33]byte
  2541  	copy(peerKey[:], completeChan.IdentityPub.SerializeCompressed())
  2542  
  2543  	// Next, we'll send over the funding locked message which marks that we
  2544  	// consider the channel open by presenting the remote party with our
  2545  	// next revocation key. Without the revocation key, the remote party
  2546  	// will be unable to propose state transitions.
  2547  	nextRevocation, err := channel.NextRevocationKey()
  2548  	if err != nil {
  2549  		return fmt.Errorf("unable to create next revocation: %v", err)
  2550  	}
  2551  	fundingLockedMsg := lnwire.NewFundingLocked(chanID, nextRevocation)
  2552  
  2553  	// If the peer has disconnected before we reach this point, we will need
  2554  	// to wait for him to come back online before sending the fundingLocked
  2555  	// message. This is special for fundingLocked, since failing to send any
  2556  	// of the previous messages in the funding flow just cancels the flow.
  2557  	// But now the funding transaction is confirmed, the channel is open
  2558  	// and we have to make sure the peer gets the fundingLocked message when
  2559  	// it comes back online. This is also crucial during restart of lnd,
  2560  	// where we might try to resend the fundingLocked message before the
  2561  	// server has had the time to connect to the peer. We keep trying to
  2562  	// send fundingLocked until we succeed, or the fundingManager is shut
  2563  	// down.
  2564  	for {
  2565  		connected := make(chan lnpeer.Peer, 1)
  2566  		f.cfg.NotifyWhenOnline(peerKey, connected)
  2567  
  2568  		var peer lnpeer.Peer
  2569  		select {
  2570  		case peer = <-connected:
  2571  		case <-f.quit:
  2572  			return ErrFundingManagerShuttingDown
  2573  		}
  2574  
  2575  		log.Infof("Peer(%x) is online, sending FundingLocked "+
  2576  			"for ChannelID(%v)", peerKey, chanID)
  2577  
  2578  		if err := peer.SendMessage(true, fundingLockedMsg); err == nil {
  2579  			// Sending succeeded, we can break out and continue the
  2580  			// funding flow.
  2581  			break
  2582  		}
  2583  
  2584  		log.Warnf("Unable to send fundingLocked to peer %x: %v. "+
  2585  			"Will retry when online", peerKey, err)
  2586  	}
  2587  
  2588  	return nil
  2589  }
  2590  
  2591  // addToRouterGraph sends a ChannelAnnouncement and a ChannelUpdate to the
  2592  // gossiper so that the channel is added to the Router's internal graph.
  2593  // These announcement messages are NOT broadcasted to the greater network,
  2594  // only to the channel counter party. The proofs required to announce the
  2595  // channel to the greater network will be created and sent in annAfterSixConfs.
  2596  func (f *Manager) addToRouterGraph(completeChan *channeldb.OpenChannel,
  2597  	shortChanID *lnwire.ShortChannelID) error {
  2598  
  2599  	chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint)
  2600  
  2601  	// We'll obtain the min HTLC value we can forward in our direction, as
  2602  	// we'll use this value within our ChannelUpdate. This constraint is
  2603  	// originally set by the remote node, as it will be the one that will
  2604  	// need to determine the smallest HTLC it deems economically relevant.
  2605  	fwdMinHTLC := completeChan.LocalChanCfg.MinHTLC
  2606  
  2607  	// We don't necessarily want to go as low as the remote party
  2608  	// allows. Check it against our default forwarding policy.
  2609  	if fwdMinHTLC < f.cfg.DefaultRoutingPolicy.MinHTLCOut {
  2610  		fwdMinHTLC = f.cfg.DefaultRoutingPolicy.MinHTLCOut
  2611  	}
  2612  
  2613  	// We'll obtain the max HTLC value we can forward in our direction, as
  2614  	// we'll use this value within our ChannelUpdate. This value must be <=
  2615  	// channel capacity and <= the maximum in-flight msats set by the peer.
  2616  	fwdMaxHTLC := completeChan.LocalChanCfg.MaxPendingAmount
  2617  	capacityMAtoms := lnwire.NewMAtomsFromAtoms(completeChan.Capacity)
  2618  	if fwdMaxHTLC > capacityMAtoms {
  2619  		fwdMaxHTLC = capacityMAtoms
  2620  	}
  2621  
  2622  	ann, err := f.newChanAnnouncement(
  2623  		f.cfg.IDKey, completeChan.IdentityPub,
  2624  		&completeChan.LocalChanCfg.MultiSigKey,
  2625  		completeChan.RemoteChanCfg.MultiSigKey.PubKey, *shortChanID,
  2626  		chanID, fwdMinHTLC, fwdMaxHTLC,
  2627  	)
  2628  	if err != nil {
  2629  		return fmt.Errorf("error generating channel "+
  2630  			"announcement: %v", err)
  2631  	}
  2632  
  2633  	// Send ChannelAnnouncement and ChannelUpdate to the gossiper to add
  2634  	// to the Router's topology.
  2635  	errChan := f.cfg.SendAnnouncement(
  2636  		ann.chanAnn, discovery.ChannelCapacity(completeChan.Capacity),
  2637  		discovery.ChannelPoint(completeChan.FundingOutpoint),
  2638  	)
  2639  	select {
  2640  	case err := <-errChan:
  2641  		if err != nil {
  2642  			if routing.IsError(err, routing.ErrOutdated,
  2643  				routing.ErrIgnored) {
  2644  				log.Debugf("Router rejected "+
  2645  					"ChannelAnnouncement: %v", err)
  2646  			} else {
  2647  				return fmt.Errorf("error sending channel "+
  2648  					"announcement: %v", err)
  2649  			}
  2650  		}
  2651  	case <-f.quit:
  2652  		return ErrFundingManagerShuttingDown
  2653  	}
  2654  
  2655  	errChan = f.cfg.SendAnnouncement(ann.chanUpdateAnn)
  2656  	select {
  2657  	case err := <-errChan:
  2658  		if err != nil {
  2659  			if routing.IsError(err, routing.ErrOutdated,
  2660  				routing.ErrIgnored) {
  2661  				log.Debugf("Router rejected "+
  2662  					"ChannelUpdate: %v", err)
  2663  			} else {
  2664  				return fmt.Errorf("error sending channel "+
  2665  					"update: %v", err)
  2666  			}
  2667  		}
  2668  	case <-f.quit:
  2669  		return ErrFundingManagerShuttingDown
  2670  	}
  2671  
  2672  	return nil
  2673  }
  2674  
  2675  // annAfterSixConfs broadcasts the necessary channel announcement messages to
  2676  // the network after 6 confs. Should be called after the fundingLocked message
  2677  // is sent and the channel is added to the router graph (channelState is
  2678  // 'addedToRouterGraph') and the channel is ready to be used. This is the last
  2679  // step in the channel opening process, and the opening state will be deleted
  2680  // from the database if successful.
  2681  func (f *Manager) annAfterSixConfs(completeChan *channeldb.OpenChannel,
  2682  	shortChanID *lnwire.ShortChannelID) error {
  2683  
  2684  	// If this channel is not meant to be announced to the greater network,
  2685  	// we'll only send our NodeAnnouncement to our counterparty to ensure we
  2686  	// don't leak any of our information.
  2687  	announceChan := completeChan.ChannelFlags&lnwire.FFAnnounceChannel != 0
  2688  	if !announceChan {
  2689  		log.Debugf("Will not announce private channel %s.",
  2690  			shortChanID)
  2691  
  2692  		peerChan := make(chan lnpeer.Peer, 1)
  2693  
  2694  		var peerKey [33]byte
  2695  		copy(peerKey[:], completeChan.IdentityPub.SerializeCompressed())
  2696  
  2697  		f.cfg.NotifyWhenOnline(peerKey, peerChan)
  2698  
  2699  		var peer lnpeer.Peer
  2700  		select {
  2701  		case peer = <-peerChan:
  2702  		case <-f.quit:
  2703  			return ErrFundingManagerShuttingDown
  2704  		}
  2705  
  2706  		nodeAnn, err := f.cfg.CurrentNodeAnnouncement()
  2707  		if err != nil {
  2708  			return fmt.Errorf("unable to retrieve current node "+
  2709  				"announcement: %v", err)
  2710  		}
  2711  
  2712  		chanID := lnwire.NewChanIDFromOutPoint(
  2713  			&completeChan.FundingOutpoint,
  2714  		)
  2715  		pubKey := peer.PubKey()
  2716  		log.Debugf("Sending our NodeAnnouncement for "+
  2717  			"ChannelID(%v) to %x", chanID, pubKey)
  2718  
  2719  		// TODO(halseth): make reliable. If the peer is not online this
  2720  		// will fail, and the opening process will stop. Should instead
  2721  		// block here, waiting for the peer to come online.
  2722  		if err := peer.SendMessage(true, &nodeAnn); err != nil {
  2723  			return fmt.Errorf("unable to send node announcement "+
  2724  				"to peer %x: %v", pubKey, err)
  2725  		}
  2726  	} else {
  2727  		// Otherwise, we'll wait until the funding transaction has
  2728  		// reached 6 confirmations before announcing it.
  2729  		numConfs := uint32(completeChan.NumConfsRequired)
  2730  		if numConfs < 6 {
  2731  			numConfs = 6
  2732  		}
  2733  		txid := completeChan.FundingOutpoint.Hash
  2734  		log.Debugf("Will announce channel %s after ChannelPoint"+
  2735  			"(%v) has gotten %d confirmations",
  2736  			shortChanID, completeChan.FundingOutpoint,
  2737  			numConfs)
  2738  
  2739  		fundingScript, err := makeFundingScript(completeChan)
  2740  		if err != nil {
  2741  			return fmt.Errorf("unable to create funding script for "+
  2742  				"ChannelPoint(%v): %v",
  2743  				completeChan.FundingOutpoint, err)
  2744  		}
  2745  
  2746  		// Register with the ChainNotifier for a notification once the
  2747  		// funding transaction reaches at least 6 confirmations.
  2748  		confNtfn, err := f.cfg.Notifier.RegisterConfirmationsNtfn(
  2749  			&txid, fundingScript, numConfs,
  2750  			completeChan.FundingBroadcastHeight,
  2751  		)
  2752  		if err != nil {
  2753  			return fmt.Errorf("unable to register for "+
  2754  				"confirmation of ChannelPoint(%v): %v",
  2755  				completeChan.FundingOutpoint, err)
  2756  		}
  2757  
  2758  		// Wait until 6 confirmations has been reached or the wallet
  2759  		// signals a shutdown.
  2760  		select {
  2761  		case _, ok := <-confNtfn.Confirmed:
  2762  			if !ok {
  2763  				return fmt.Errorf("ChainNotifier shutting "+
  2764  					"down, cannot complete funding flow "+
  2765  					"for ChannelPoint(%v)",
  2766  					completeChan.FundingOutpoint)
  2767  			}
  2768  			// Fallthrough.
  2769  
  2770  		case <-f.quit:
  2771  			return fmt.Errorf("%v, stopping funding flow for "+
  2772  				"ChannelPoint(%v)",
  2773  				ErrFundingManagerShuttingDown,
  2774  				completeChan.FundingOutpoint)
  2775  		}
  2776  
  2777  		fundingPoint := completeChan.FundingOutpoint
  2778  		chanID := lnwire.NewChanIDFromOutPoint(&fundingPoint)
  2779  
  2780  		log.Infof("Announcing ChannelPoint(%v), short_chan_id=%s",
  2781  			&fundingPoint, shortChanID)
  2782  
  2783  		// Create and broadcast the proofs required to make this
  2784  		// channel public and usable for other nodes for routing.
  2785  		err = f.announceChannel(
  2786  			f.cfg.IDKey, completeChan.IdentityPub,
  2787  			&completeChan.LocalChanCfg.MultiSigKey,
  2788  			completeChan.RemoteChanCfg.MultiSigKey.PubKey,
  2789  			*shortChanID, chanID,
  2790  		)
  2791  		if err != nil {
  2792  			return fmt.Errorf("channel announcement failed: %v", err)
  2793  		}
  2794  
  2795  		log.Debugf("Channel with ChannelPoint(%v), short_chan_id=%s "+
  2796  			"sent to gossiper", &fundingPoint, shortChanID)
  2797  	}
  2798  
  2799  	return nil
  2800  }
  2801  
  2802  // handleFundingLocked finalizes the channel funding process and enables the
  2803  // channel to enter normal operating mode.
  2804  func (f *Manager) handleFundingLocked(peer lnpeer.Peer,
  2805  	msg *lnwire.FundingLocked) {
  2806  
  2807  	defer f.wg.Done()
  2808  	log.Debugf("Received FundingLocked for ChannelID(%v) from "+
  2809  		"peer %x", msg.ChanID,
  2810  		peer.IdentityKey().SerializeCompressed())
  2811  
  2812  	// If we are currently in the process of handling a funding locked
  2813  	// message for this channel, ignore.
  2814  	f.handleFundingLockedMtx.Lock()
  2815  	_, ok := f.handleFundingLockedBarriers[msg.ChanID]
  2816  	if ok {
  2817  		log.Infof("Already handling fundingLocked for "+
  2818  			"ChannelID(%v), ignoring.", msg.ChanID)
  2819  		f.handleFundingLockedMtx.Unlock()
  2820  		return
  2821  	}
  2822  
  2823  	// If not already handling fundingLocked for this channel, set up
  2824  	// barrier, and move on.
  2825  	f.handleFundingLockedBarriers[msg.ChanID] = struct{}{}
  2826  	f.handleFundingLockedMtx.Unlock()
  2827  
  2828  	defer func() {
  2829  		f.handleFundingLockedMtx.Lock()
  2830  		delete(f.handleFundingLockedBarriers, msg.ChanID)
  2831  		f.handleFundingLockedMtx.Unlock()
  2832  	}()
  2833  
  2834  	f.localDiscoveryMtx.Lock()
  2835  	localDiscoverySignal, ok := f.localDiscoverySignals[msg.ChanID]
  2836  	f.localDiscoveryMtx.Unlock()
  2837  
  2838  	if ok {
  2839  		// Before we proceed with processing the funding locked
  2840  		// message, we'll wait for the local waitForFundingConfirmation
  2841  		// goroutine to signal that it has the necessary state in
  2842  		// place. Otherwise, we may be missing critical information
  2843  		// required to handle forwarded HTLC's.
  2844  		select {
  2845  		case <-localDiscoverySignal:
  2846  			// Fallthrough
  2847  		case <-f.quit:
  2848  			return
  2849  		}
  2850  
  2851  		// With the signal received, we can now safely delete the entry
  2852  		// from the map.
  2853  		f.localDiscoveryMtx.Lock()
  2854  		delete(f.localDiscoverySignals, msg.ChanID)
  2855  		f.localDiscoveryMtx.Unlock()
  2856  	}
  2857  
  2858  	// First, we'll attempt to locate the channel whose funding workflow is
  2859  	// being finalized by this message. We go to the database rather than
  2860  	// our reservation map as we may have restarted, mid funding flow.
  2861  	chanID := msg.ChanID
  2862  	channel, err := f.cfg.FindChannel(chanID)
  2863  	if err != nil {
  2864  		log.Errorf("Unable to locate ChannelID(%v), cannot complete "+
  2865  			"funding", chanID)
  2866  		return
  2867  	}
  2868  
  2869  	// If the RemoteNextRevocation is non-nil, it means that we have
  2870  	// already processed fundingLocked for this channel, so ignore.
  2871  	if channel.RemoteNextRevocation != nil {
  2872  		log.Infof("Received duplicate fundingLocked for "+
  2873  			"ChannelID(%v), ignoring.", chanID)
  2874  		return
  2875  	}
  2876  
  2877  	// The funding locked message contains the next commitment point we'll
  2878  	// need to create the next commitment state for the remote party. So
  2879  	// we'll insert that into the channel now before passing it along to
  2880  	// other sub-systems.
  2881  	err = channel.InsertNextRevocation(msg.NextPerCommitmentPoint)
  2882  	if err != nil {
  2883  		log.Errorf("unable to insert next commitment point: %v", err)
  2884  		return
  2885  	}
  2886  
  2887  	// Launch a defer so we _ensure_ that the channel barrier is properly
  2888  	// closed even if the target peer is no longer online at this point.
  2889  	defer func() {
  2890  		// Close the active channel barrier signaling the readHandler
  2891  		// that commitment related modifications to this channel can
  2892  		// now proceed.
  2893  		f.barrierMtx.Lock()
  2894  		chanBarrier, ok := f.newChanBarriers[chanID]
  2895  		if ok {
  2896  			log.Tracef("Closing chan barrier for ChanID(%v)",
  2897  				chanID)
  2898  			close(chanBarrier)
  2899  			delete(f.newChanBarriers, chanID)
  2900  		}
  2901  		f.barrierMtx.Unlock()
  2902  	}()
  2903  
  2904  	if err := peer.AddNewChannel(channel, f.quit); err != nil {
  2905  		log.Errorf("Unable to add new channel %v with peer %x: %v",
  2906  			channel.FundingOutpoint,
  2907  			peer.IdentityKey().SerializeCompressed(), err,
  2908  		)
  2909  	}
  2910  }
  2911  
  2912  // chanAnnouncement encapsulates the two authenticated announcements that we
  2913  // send out to the network after a new channel has been created locally.
  2914  type chanAnnouncement struct {
  2915  	chanAnn       *lnwire.ChannelAnnouncement
  2916  	chanUpdateAnn *lnwire.ChannelUpdate
  2917  	chanProof     *lnwire.AnnounceSignatures
  2918  }
  2919  
  2920  // newChanAnnouncement creates the authenticated channel announcement messages
  2921  // required to broadcast a newly created channel to the network. The
  2922  // announcement is two part: the first part authenticates the existence of the
  2923  // channel and contains four signatures binding the funding pub keys and
  2924  // identity pub keys of both parties to the channel, and the second segment is
  2925  // authenticated only by us and contains our directional routing policy for the
  2926  // channel.
  2927  func (f *Manager) newChanAnnouncement(localPubKey,
  2928  	remotePubKey *secp256k1.PublicKey, localFundingKey *keychain.KeyDescriptor,
  2929  	remoteFundingKey *secp256k1.PublicKey, shortChanID lnwire.ShortChannelID,
  2930  	chanID lnwire.ChannelID, fwdMinHTLC,
  2931  	fwdMaxHTLC lnwire.MilliAtom) (*chanAnnouncement, error) {
  2932  
  2933  	chainHash := f.cfg.Wallet.Cfg.NetParams.GenesisHash
  2934  
  2935  	// The unconditional section of the announcement is the ShortChannelID
  2936  	// itself which compactly encodes the location of the funding output
  2937  	// within the blockchain.
  2938  	chanAnn := &lnwire.ChannelAnnouncement{
  2939  		ShortChannelID: shortChanID,
  2940  		Features:       lnwire.NewRawFeatureVector(),
  2941  		ChainHash:      chainHash,
  2942  	}
  2943  
  2944  	// The chanFlags field indicates which directed edge of the channel is
  2945  	// being updated within the ChannelUpdateAnnouncement announcement
  2946  	// below. A value of zero means it's the edge of the "first" node and 1
  2947  	// being the other node.
  2948  	var chanFlags lnwire.ChanUpdateChanFlags
  2949  
  2950  	// The lexicographical ordering of the two identity public keys of the
  2951  	// nodes indicates which of the nodes is "first". If our serialized
  2952  	// identity key is lower than theirs then we're the "first" node and
  2953  	// second otherwise.
  2954  	selfBytes := localPubKey.SerializeCompressed()
  2955  	remoteBytes := remotePubKey.SerializeCompressed()
  2956  	if bytes.Compare(selfBytes, remoteBytes) == -1 {
  2957  		copy(chanAnn.NodeID1[:], localPubKey.SerializeCompressed())
  2958  		copy(chanAnn.NodeID2[:], remotePubKey.SerializeCompressed())
  2959  		copy(chanAnn.DecredKey1[:], localFundingKey.PubKey.SerializeCompressed())
  2960  		copy(chanAnn.DecredKey2[:], remoteFundingKey.SerializeCompressed())
  2961  
  2962  		// If we're the first node then update the chanFlags to
  2963  		// indicate the "direction" of the update.
  2964  		chanFlags = 0
  2965  	} else {
  2966  		copy(chanAnn.NodeID1[:], remotePubKey.SerializeCompressed())
  2967  		copy(chanAnn.NodeID2[:], localPubKey.SerializeCompressed())
  2968  		copy(chanAnn.DecredKey1[:], remoteFundingKey.SerializeCompressed())
  2969  		copy(chanAnn.DecredKey2[:], localFundingKey.PubKey.SerializeCompressed())
  2970  
  2971  		// If we're the second node then update the chanFlags to
  2972  		// indicate the "direction" of the update.
  2973  		chanFlags = 1
  2974  	}
  2975  
  2976  	// Our channel update message flags will signal that we support the
  2977  	// max_htlc field.
  2978  	msgFlags := lnwire.ChanUpdateOptionMaxHtlc
  2979  
  2980  	// We announce the channel with the default values. Some of
  2981  	// these values can later be changed by crafting a new ChannelUpdate.
  2982  	chanUpdateAnn := &lnwire.ChannelUpdate{
  2983  		ShortChannelID: shortChanID,
  2984  		ChainHash:      chainHash,
  2985  		Timestamp:      uint32(time.Now().Unix()),
  2986  		MessageFlags:   msgFlags,
  2987  		ChannelFlags:   chanFlags,
  2988  		TimeLockDelta:  uint16(f.cfg.DefaultRoutingPolicy.TimeLockDelta),
  2989  
  2990  		// We use the HtlcMinimumMAtoms that the remote party required us
  2991  		// to use, as our ChannelUpdate will be used to carry HTLCs
  2992  		// towards them.
  2993  		HtlcMinimumMAtoms: fwdMinHTLC,
  2994  		HtlcMaximumMAtoms: fwdMaxHTLC,
  2995  
  2996  		BaseFee: uint32(f.cfg.DefaultRoutingPolicy.BaseFee),
  2997  		FeeRate: uint32(f.cfg.DefaultRoutingPolicy.FeeRate),
  2998  	}
  2999  
  3000  	// With the channel update announcement constructed, we'll generate a
  3001  	// signature that signs a double-sha digest of the announcement.
  3002  	// This'll serve to authenticate this announcement and any other future
  3003  	// updates we may send.
  3004  	chanUpdateMsg, err := chanUpdateAnn.DataToSign()
  3005  	if err != nil {
  3006  		return nil, err
  3007  	}
  3008  	sig, err := f.cfg.SignMessage(f.cfg.IDKeyLoc, chanUpdateMsg, false)
  3009  	if err != nil {
  3010  		return nil, errors.Errorf("unable to generate channel "+
  3011  			"update announcement signature: %v", err)
  3012  	}
  3013  	chanUpdateAnn.Signature, err = lnwire.NewSigFromSignature(sig)
  3014  	if err != nil {
  3015  		return nil, errors.Errorf("unable to generate channel "+
  3016  			"update announcement signature: %v", err)
  3017  	}
  3018  
  3019  	// The channel existence proofs itself is currently announced in
  3020  	// distinct message. In order to properly authenticate this message, we
  3021  	// need two signatures: one under the identity public key used which
  3022  	// signs the message itself and another signature of the identity
  3023  	// public key under the funding key itself.
  3024  	//
  3025  	// TODO(roasbeef): use SignAnnouncement here instead?
  3026  	chanAnnMsg, err := chanAnn.DataToSign()
  3027  	if err != nil {
  3028  		return nil, err
  3029  	}
  3030  	nodeSig, err := f.cfg.SignMessage(f.cfg.IDKeyLoc, chanAnnMsg, false)
  3031  	if err != nil {
  3032  		return nil, errors.Errorf("unable to generate node "+
  3033  			"signature for channel announcement: %v", err)
  3034  	}
  3035  	decredSig, err := f.cfg.SignMessage(
  3036  		localFundingKey.KeyLocator, chanAnnMsg, false,
  3037  	)
  3038  	if err != nil {
  3039  		return nil, errors.Errorf("unable to generate decred "+
  3040  			"signature for node public key: %v", err)
  3041  	}
  3042  
  3043  	// Finally, we'll generate the announcement proof which we'll use to
  3044  	// provide the other side with the necessary signatures required to
  3045  	// allow them to reconstruct the full channel announcement.
  3046  	proof := &lnwire.AnnounceSignatures{
  3047  		ChannelID:      chanID,
  3048  		ShortChannelID: shortChanID,
  3049  	}
  3050  	proof.NodeSignature, err = lnwire.NewSigFromSignature(nodeSig)
  3051  	if err != nil {
  3052  		return nil, err
  3053  	}
  3054  	proof.DecredSignature, err = lnwire.NewSigFromSignature(decredSig)
  3055  	if err != nil {
  3056  		return nil, err
  3057  	}
  3058  
  3059  	return &chanAnnouncement{
  3060  		chanAnn:       chanAnn,
  3061  		chanUpdateAnn: chanUpdateAnn,
  3062  		chanProof:     proof,
  3063  	}, nil
  3064  }
  3065  
  3066  // announceChannel announces a newly created channel to the rest of the network
  3067  // by crafting the two authenticated announcements required for the peers on
  3068  // the network to recognize the legitimacy of the channel. The crafted
  3069  // announcements are then sent to the channel router to handle broadcasting to
  3070  // the network during its next trickle.
  3071  // This method is synchronous and will return when all the network requests
  3072  // finish, either successfully or with an error.
  3073  func (f *Manager) announceChannel(localIDKey, remoteIDKey *secp256k1.PublicKey,
  3074  	localFundingKey *keychain.KeyDescriptor,
  3075  	remoteFundingKey *secp256k1.PublicKey, shortChanID lnwire.ShortChannelID,
  3076  	chanID lnwire.ChannelID) error {
  3077  
  3078  	// First, we'll create the batch of announcements to be sent upon
  3079  	// initial channel creation. This includes the channel announcement
  3080  	// itself, the channel update announcement, and our half of the channel
  3081  	// proof needed to fully authenticate the channel.
  3082  	//
  3083  	// We can pass in zeroes for the min and max htlc policy, because we
  3084  	// only use the channel announcement message from the returned struct.
  3085  	ann, err := f.newChanAnnouncement(localIDKey, remoteIDKey,
  3086  		localFundingKey, remoteFundingKey, shortChanID, chanID,
  3087  		0, 0,
  3088  	)
  3089  	if err != nil {
  3090  		log.Errorf("can't generate channel announcement: %v", err)
  3091  		return err
  3092  	}
  3093  
  3094  	// We only send the channel proof announcement and the node announcement
  3095  	// because addToRouterGraph previously sent the ChannelAnnouncement and
  3096  	// the ChannelUpdate announcement messages. The channel proof and node
  3097  	// announcements are broadcast to the greater network.
  3098  	errChan := f.cfg.SendAnnouncement(ann.chanProof)
  3099  	select {
  3100  	case err := <-errChan:
  3101  		if err != nil {
  3102  			if routing.IsError(err, routing.ErrOutdated,
  3103  				routing.ErrIgnored) {
  3104  				log.Debugf("Router rejected "+
  3105  					"AnnounceSignatures: %v", err)
  3106  			} else {
  3107  				log.Errorf("Unable to send channel "+
  3108  					"proof: %v", err)
  3109  				return err
  3110  			}
  3111  		}
  3112  
  3113  	case <-f.quit:
  3114  		return ErrFundingManagerShuttingDown
  3115  	}
  3116  
  3117  	// Now that the channel is announced to the network, we will also
  3118  	// obtain and send a node announcement. This is done since a node
  3119  	// announcement is only accepted after a channel is known for that
  3120  	// particular node, and this might be our first channel.
  3121  	nodeAnn, err := f.cfg.CurrentNodeAnnouncement()
  3122  	if err != nil {
  3123  		log.Errorf("can't generate node announcement: %v", err)
  3124  		return err
  3125  	}
  3126  
  3127  	errChan = f.cfg.SendAnnouncement(&nodeAnn)
  3128  	select {
  3129  	case err := <-errChan:
  3130  		if err != nil {
  3131  			if routing.IsError(err, routing.ErrOutdated,
  3132  				routing.ErrIgnored) {
  3133  				log.Debugf("Router rejected "+
  3134  					"NodeAnnouncement: %v", err)
  3135  			} else {
  3136  				log.Errorf("Unable to send node "+
  3137  					"announcement: %v", err)
  3138  				return err
  3139  			}
  3140  		}
  3141  
  3142  	case <-f.quit:
  3143  		return ErrFundingManagerShuttingDown
  3144  	}
  3145  
  3146  	return nil
  3147  }
  3148  
  3149  // InitFundingWorkflow sends a message to the funding manager instructing it
  3150  // to initiate a single funder workflow with the source peer.
  3151  // TODO(roasbeef): re-visit blocking nature..
  3152  func (f *Manager) InitFundingWorkflow(msg *InitFundingMsg) {
  3153  	f.fundingRequests <- msg
  3154  }
  3155  
  3156  // getUpfrontShutdownScript takes a user provided script and a getScript
  3157  // function which can be used to generate an upfront shutdown script. If our
  3158  // peer does not support the feature, this function will error if a non-zero
  3159  // script was provided by the user, and return an empty script otherwise. If
  3160  // our peer does support the feature, we will return the user provided script
  3161  // if non-zero, or a freshly generated script if our node is configured to set
  3162  // upfront shutdown scripts automatically.
  3163  func getUpfrontShutdownScript(enableUpfrontShutdown bool, peer lnpeer.Peer,
  3164  	script lnwire.DeliveryAddress,
  3165  	getScript func() (lnwire.DeliveryAddress, error)) (lnwire.DeliveryAddress,
  3166  	error) {
  3167  
  3168  	// Check whether the remote peer supports upfront shutdown scripts.
  3169  	remoteUpfrontShutdown := peer.RemoteFeatures().HasFeature(
  3170  		lnwire.UpfrontShutdownScriptOptional,
  3171  	)
  3172  
  3173  	// If the peer does not support upfront shutdown scripts, and one has been
  3174  	// provided, return an error because the feature is not supported.
  3175  	if !remoteUpfrontShutdown && len(script) != 0 {
  3176  		return nil, errUpfrontShutdownScriptNotSupported
  3177  	}
  3178  
  3179  	// If the peer does not support upfront shutdown, return an empty address.
  3180  	if !remoteUpfrontShutdown {
  3181  		return nil, nil
  3182  	}
  3183  
  3184  	// If the user has provided an script and the peer supports the feature,
  3185  	// return it. Note that user set scripts override the enable upfront
  3186  	// shutdown flag.
  3187  	if len(script) > 0 {
  3188  		return script, nil
  3189  	}
  3190  
  3191  	// If we do not have setting of upfront shutdown script enabled, return
  3192  	// an empty script.
  3193  	if !enableUpfrontShutdown {
  3194  		return nil, nil
  3195  	}
  3196  
  3197  	return getScript()
  3198  }
  3199  
  3200  // handleInitFundingMsg creates a channel reservation within the daemon's
  3201  // wallet, then sends a funding request to the remote peer kicking off the
  3202  // funding workflow.
  3203  func (f *Manager) handleInitFundingMsg(msg *InitFundingMsg) {
  3204  	var (
  3205  		peerKey        = msg.Peer.IdentityKey()
  3206  		localAmt       = msg.LocalFundingAmt
  3207  		minHtlcIn      = msg.MinHtlcIn
  3208  		remoteCsvDelay = msg.RemoteCsvDelay
  3209  		maxValue       = msg.MaxValueInFlight
  3210  		maxHtlcs       = msg.MaxHtlcs
  3211  		maxCSV         = msg.MaxLocalCsv
  3212  	)
  3213  
  3214  	// If no maximum CSV delay was set for this channel, we use our default
  3215  	// value.
  3216  	if maxCSV == 0 {
  3217  		maxCSV = f.cfg.MaxLocalCSVDelay
  3218  	}
  3219  
  3220  	log.Infof("Initiating fundingRequest(local_amt=%v "+
  3221  		"(subtract_fees=%v), push_amt=%v, chain_hash=%v, peer=%x, "+
  3222  		"min_confs=%v)", localAmt, msg.SubtractFees, msg.PushAmt,
  3223  		msg.ChainHash, peerKey.SerializeCompressed(), msg.MinConfs)
  3224  
  3225  	// We set the channel flags to indicate whether we want this channel to
  3226  	// be announced to the network.
  3227  	var channelFlags lnwire.FundingFlag
  3228  	if !msg.Private {
  3229  		// This channel will be announced.
  3230  		channelFlags = lnwire.FFAnnounceChannel
  3231  	}
  3232  
  3233  	// Record the peer address only for outbound connections, since inbound
  3234  	// connections are unlikely to be recoverable from our end.
  3235  	var peerAddr net.Addr
  3236  	if !msg.Peer.Inbound() {
  3237  		peerAddr = msg.Peer.Address()
  3238  	}
  3239  
  3240  	// If the caller specified their own channel ID, then we'll use that.
  3241  	// Otherwise we'll generate a fresh one as normal.  This will be used
  3242  	// to track this reservation throughout its lifetime.
  3243  	var chanID [32]byte
  3244  	if msg.PendingChanID == zeroID {
  3245  		chanID = f.nextPendingChanID()
  3246  	} else {
  3247  		// If the user specified their own pending channel ID, then
  3248  		// we'll ensure it doesn't collide with any existing pending
  3249  		// channel ID.
  3250  		chanID = msg.PendingChanID
  3251  		if _, err := f.getReservationCtx(peerKey, chanID); err == nil {
  3252  			msg.Err <- fmt.Errorf("pendingChannelID(%x) "+
  3253  				"already present", chanID[:])
  3254  			return
  3255  		}
  3256  	}
  3257  
  3258  	// Check whether the peer supports upfront shutdown, and get an address
  3259  	// which should be used (either a user specified address or a new
  3260  	// address from the wallet if our node is configured to set shutdown
  3261  	// address by default).
  3262  	shutdown, err := getUpfrontShutdownScript(
  3263  		f.cfg.EnableUpfrontShutdown, msg.Peer,
  3264  		msg.ShutdownScript,
  3265  		func() (lnwire.DeliveryAddress, error) {
  3266  			addr, err := f.cfg.Wallet.NewAddress(
  3267  				lnwallet.PubKeyHash, false,
  3268  				lnwallet.DefaultAccountName,
  3269  			)
  3270  			if err != nil {
  3271  				return nil, err
  3272  			}
  3273  			return input.PayToAddrScript(addr)
  3274  		},
  3275  	)
  3276  	if err != nil {
  3277  		msg.Err <- err
  3278  		return
  3279  	}
  3280  
  3281  	// Initialize a funding reservation with the local wallet. If the
  3282  	// wallet doesn't have enough funds to commit to this channel, then the
  3283  	// request will fail, and be aborted.
  3284  	//
  3285  	// Before we init the channel, we'll also check to see what commitment
  3286  	// format we can use with this peer. This is dependent on *both* us and
  3287  	// the remote peer are signaling the proper feature bit.
  3288  	_, chanType, commitType, err := negotiateCommitmentType(
  3289  		msg.ChannelType, msg.Peer.LocalFeatures(),
  3290  		msg.Peer.RemoteFeatures(), true,
  3291  	)
  3292  	if err != nil {
  3293  		log.Errorf("channel type negotiation failed: %v", err)
  3294  		msg.Err <- err
  3295  		return
  3296  	}
  3297  
  3298  	// First, we'll query the fee estimator for a fee that should get the
  3299  	// commitment transaction confirmed by the next few blocks (conf target
  3300  	// of 3). We target the near blocks here to ensure that we'll be able
  3301  	// to execute a timely unilateral channel closure if needed.
  3302  	commitFeePerKB, err := f.cfg.FeeEstimator.EstimateFeePerKB(3)
  3303  	if err != nil {
  3304  		msg.Err <- err
  3305  		return
  3306  	}
  3307  
  3308  	// For anchor channels cap the initial commit fee rate at our defined
  3309  	// maximum.
  3310  	if commitType.HasAnchors() &&
  3311  		commitFeePerKB > f.cfg.MaxAnchorsCommitFeeRate {
  3312  		commitFeePerKB = f.cfg.MaxAnchorsCommitFeeRate
  3313  	}
  3314  
  3315  	req := &lnwallet.InitFundingReserveMsg{
  3316  		ChainHash:        &msg.ChainHash,
  3317  		PendingChanID:    chanID,
  3318  		NodeID:           peerKey,
  3319  		NodeAddr:         peerAddr,
  3320  		SubtractFees:     msg.SubtractFees,
  3321  		LocalFundingAmt:  localAmt,
  3322  		RemoteFundingAmt: 0,
  3323  		CommitFeePerKB:   commitFeePerKB,
  3324  		FundingFeePerKB:  msg.FundingFeePerKB,
  3325  		PushMAtoms:       msg.PushAmt,
  3326  		Flags:            channelFlags,
  3327  		MinConfs:         msg.MinConfs,
  3328  		CommitType:       commitType,
  3329  		ChanFunder:       msg.ChanFunder,
  3330  	}
  3331  
  3332  	reservation, err := f.cfg.Wallet.InitChannelReservation(req)
  3333  	if err != nil {
  3334  		msg.Err <- err
  3335  		return
  3336  	}
  3337  
  3338  	// Set our upfront shutdown address in the existing reservation.
  3339  	reservation.SetOurUpfrontShutdown(shutdown)
  3340  
  3341  	// Now that we have successfully reserved funds for this channel in the
  3342  	// wallet, we can fetch the final channel capacity. This is done at
  3343  	// this point since the final capacity might change in case of
  3344  	// SubtractFees=true.
  3345  	capacity := reservation.Capacity()
  3346  
  3347  	log.Infof("Target commit tx atom/kB for pending_id(%x): %v", chanID,
  3348  		int64(commitFeePerKB))
  3349  
  3350  	// If the remote CSV delay was not set in the open channel request,
  3351  	// we'll use the RequiredRemoteDelay closure to compute the delay we
  3352  	// require given the total amount of funds within the channel.
  3353  	if remoteCsvDelay == 0 {
  3354  		remoteCsvDelay = f.cfg.RequiredRemoteDelay(capacity)
  3355  	}
  3356  
  3357  	// If no minimum HTLC value was specified, use the default one.
  3358  	if minHtlcIn == 0 {
  3359  		minHtlcIn = f.cfg.DefaultMinHtlcIn
  3360  	}
  3361  
  3362  	// If no max value was specified, use the default one.
  3363  	if maxValue == 0 {
  3364  		maxValue = f.cfg.RequiredRemoteMaxValue(capacity)
  3365  	}
  3366  
  3367  	if maxHtlcs == 0 {
  3368  		maxHtlcs = f.cfg.RequiredRemoteMaxHTLCs(capacity)
  3369  	}
  3370  
  3371  	// If a pending channel map for this peer isn't already created, then
  3372  	// we create one, ultimately allowing us to track this pending
  3373  	// reservation within the target peer.
  3374  	peerIDKey := newSerializedKey(peerKey)
  3375  	f.resMtx.Lock()
  3376  	if _, ok := f.activeReservations[peerIDKey]; !ok {
  3377  		f.activeReservations[peerIDKey] = make(pendingChannels)
  3378  	}
  3379  
  3380  	resCtx := &reservationWithCtx{
  3381  		chanAmt:        capacity,
  3382  		remoteCsvDelay: remoteCsvDelay,
  3383  		remoteMinHtlc:  minHtlcIn,
  3384  		remoteMaxValue: maxValue,
  3385  		remoteMaxHtlcs: maxHtlcs,
  3386  		maxLocalCsv:    maxCSV,
  3387  		channelType:    msg.ChannelType,
  3388  		reservation:    reservation,
  3389  		peer:           msg.Peer,
  3390  		updates:        msg.Updates,
  3391  		err:            msg.Err,
  3392  	}
  3393  	f.activeReservations[peerIDKey][chanID] = resCtx
  3394  	f.resMtx.Unlock()
  3395  
  3396  	// Update the timestamp once the InitFundingMsg has been handled.
  3397  	defer resCtx.updateTimestamp()
  3398  
  3399  	// Once the reservation has been created, and indexed, queue a funding
  3400  	// request to the remote peer, kicking off the funding workflow.
  3401  	ourContribution := reservation.OurContribution()
  3402  
  3403  	// Fetch our dust limit which is part of the default channel
  3404  	// constraints, and log it.
  3405  	ourDustLimit := ourContribution.DustLimit
  3406  
  3407  	log.Infof("Dust limit for pendingID(%x): %v", chanID, ourDustLimit)
  3408  
  3409  	// Finally, we'll use the current value of the channels and our default
  3410  	// policy to determine of required commitment constraints for the
  3411  	// remote party.
  3412  	chanReserve := f.cfg.RequiredRemoteChanReserve(capacity, ourDustLimit)
  3413  
  3414  	// When opening a script enforced channel lease, include the required
  3415  	// expiry TLV record in our proposal.
  3416  	var leaseExpiry *lnwire.LeaseExpiry
  3417  	if commitType == lnwallet.CommitmentTypeScriptEnforcedLease {
  3418  		leaseExpiry = new(lnwire.LeaseExpiry)
  3419  		*leaseExpiry = lnwire.LeaseExpiry(reservation.LeaseExpiry())
  3420  	}
  3421  
  3422  	log.Infof("Starting funding workflow with %v for pending_id(%x), "+
  3423  		"committype=%v", msg.Peer.Address(), chanID, commitType)
  3424  
  3425  	fundingOpen := lnwire.OpenChannel{
  3426  		ChainHash:             f.cfg.Wallet.Cfg.NetParams.GenesisHash,
  3427  		PendingChannelID:      chanID,
  3428  		FundingAmount:         capacity,
  3429  		PushAmount:            msg.PushAmt,
  3430  		DustLimit:             ourDustLimit,
  3431  		MaxValueInFlight:      maxValue,
  3432  		ChannelReserve:        chanReserve,
  3433  		HtlcMinimum:           minHtlcIn,
  3434  		FeePerKiloByte:        uint32(commitFeePerKB),
  3435  		CsvDelay:              remoteCsvDelay,
  3436  		MaxAcceptedHTLCs:      maxHtlcs,
  3437  		FundingKey:            ourContribution.MultiSigKey.PubKey,
  3438  		RevocationPoint:       ourContribution.RevocationBasePoint.PubKey,
  3439  		PaymentPoint:          ourContribution.PaymentBasePoint.PubKey,
  3440  		HtlcPoint:             ourContribution.HtlcBasePoint.PubKey,
  3441  		DelayedPaymentPoint:   ourContribution.DelayBasePoint.PubKey,
  3442  		FirstCommitmentPoint:  ourContribution.FirstCommitmentPoint,
  3443  		ChannelFlags:          channelFlags,
  3444  		UpfrontShutdownScript: shutdown,
  3445  		ChannelType:           chanType,
  3446  		LeaseExpiry:           leaseExpiry,
  3447  	}
  3448  	if err := msg.Peer.SendMessage(true, &fundingOpen); err != nil {
  3449  		e := fmt.Errorf("unable to send funding request message: %v",
  3450  			err)
  3451  		log.Errorf(e.Error())
  3452  
  3453  		// Since we were unable to send the initial message to the peer
  3454  		// and start the funding flow, we'll cancel this reservation.
  3455  		_, err := f.cancelReservationCtx(peerKey, chanID, false)
  3456  		if err != nil {
  3457  			log.Errorf("unable to cancel reservation: %v", err)
  3458  		}
  3459  
  3460  		msg.Err <- e
  3461  		return
  3462  	}
  3463  }
  3464  
  3465  // handleErrorMsg processes the error which was received from remote peer,
  3466  // depending on the type of error we should do different clean up steps and
  3467  // inform the user about it.
  3468  func (f *Manager) handleErrorMsg(peer lnpeer.Peer,
  3469  	msg *lnwire.Error) {
  3470  
  3471  	chanID := msg.ChanID
  3472  	peerKey := peer.IdentityKey()
  3473  
  3474  	// First, we'll attempt to retrieve and cancel the funding workflow
  3475  	// that this error was tied to. If we're unable to do so, then we'll
  3476  	// exit early as this was an unwarranted error.
  3477  	resCtx, err := f.cancelReservationCtx(peerKey, chanID, true)
  3478  	if err != nil {
  3479  		log.Warnf("Received error for non-existent funding "+
  3480  			"flow: %v (%v)", err, msg.Error())
  3481  		return
  3482  	}
  3483  
  3484  	// If we did indeed find the funding workflow, then we'll return the
  3485  	// error back to the caller (if any), and cancel the workflow itself.
  3486  	fundingErr := fmt.Errorf("received funding error from %x: %v",
  3487  		peerKey.SerializeCompressed(), msg.Error(),
  3488  	)
  3489  	log.Errorf(fundingErr.Error())
  3490  
  3491  	// If this was a PSBT funding flow, the remote likely timed out because
  3492  	// we waited too long. Return a nice error message to the user in that
  3493  	// case so the user knows what's the problem.
  3494  	if resCtx.reservation.IsPsbt() {
  3495  		fundingErr = fmt.Errorf("%w: %v", chanfunding.ErrRemoteCanceled,
  3496  			fundingErr)
  3497  	}
  3498  
  3499  	resCtx.err <- fundingErr
  3500  }
  3501  
  3502  // pruneZombieReservations loops through all pending reservations and fails the
  3503  // funding flow for any reservations that have not been updated since the
  3504  // ReservationTimeout and are not locked waiting for the funding transaction.
  3505  func (f *Manager) pruneZombieReservations() {
  3506  	zombieReservations := make(pendingChannels)
  3507  
  3508  	f.resMtx.RLock()
  3509  	for _, pendingReservations := range f.activeReservations {
  3510  		for pendingChanID, resCtx := range pendingReservations {
  3511  			locked, _ := resCtx.isLocked()
  3512  			if locked {
  3513  				continue
  3514  			}
  3515  
  3516  			// We don't want to expire PSBT funding reservations.
  3517  			// These reservations are always initiated by us and
  3518  			// the remote peer is likely going to cancel them after
  3519  			// some idle time anyway. So no need for us to also
  3520  			// prune them.
  3521  			sinceLastUpdate := time.Since(resCtx.lastUpdated)
  3522  			isExpired := sinceLastUpdate > f.cfg.ReservationTimeout
  3523  			if !resCtx.reservation.IsPsbt() && isExpired {
  3524  				zombieReservations[pendingChanID] = resCtx
  3525  			}
  3526  		}
  3527  	}
  3528  	f.resMtx.RUnlock()
  3529  
  3530  	for pendingChanID, resCtx := range zombieReservations {
  3531  		err := fmt.Errorf("reservation timed out waiting for peer "+
  3532  			"(peer_id:%x, chan_id:%x)", resCtx.peer.IdentityKey().SerializeCompressed(),
  3533  			pendingChanID[:])
  3534  		log.Warnf(err.Error())
  3535  		f.failFundingFlow(resCtx.peer, pendingChanID, err)
  3536  	}
  3537  }
  3538  
  3539  // cancelReservationCtx does all needed work in order to securely cancel the
  3540  // reservation.
  3541  func (f *Manager) cancelReservationCtx(peerKey *secp256k1.PublicKey,
  3542  	pendingChanID [32]byte, byRemote bool) (*reservationWithCtx, error) {
  3543  
  3544  	log.Infof("Cancelling funding reservation for node_key=%x, "+
  3545  		"chan_id=%x", peerKey.SerializeCompressed(), pendingChanID[:])
  3546  
  3547  	peerIDKey := newSerializedKey(peerKey)
  3548  	f.resMtx.Lock()
  3549  	defer f.resMtx.Unlock()
  3550  
  3551  	nodeReservations, ok := f.activeReservations[peerIDKey]
  3552  	if !ok {
  3553  		// No reservations for this node.
  3554  		return nil, errors.Errorf("no active reservations for peer(%x)",
  3555  			peerIDKey[:])
  3556  	}
  3557  
  3558  	ctx, ok := nodeReservations[pendingChanID]
  3559  	if !ok {
  3560  		return nil, errors.Errorf("unknown channel (id: %x) for "+
  3561  			"peer(%x)", pendingChanID[:], peerIDKey[:])
  3562  	}
  3563  
  3564  	// If the reservation was a PSBT funding flow and it was canceled by the
  3565  	// remote peer, then we need to thread through a different error message
  3566  	// to the subroutine that's waiting for the user input so it can return
  3567  	// a nice error message to the user.
  3568  	if ctx.reservation.IsPsbt() && byRemote {
  3569  		ctx.reservation.RemoteCanceled()
  3570  	}
  3571  
  3572  	if err := ctx.reservation.Cancel(); err != nil {
  3573  		return nil, errors.Errorf("unable to cancel reservation: %v",
  3574  			err)
  3575  	}
  3576  
  3577  	delete(nodeReservations, pendingChanID)
  3578  
  3579  	// If this was the last active reservation for this peer, delete the
  3580  	// peer's entry altogether.
  3581  	if len(nodeReservations) == 0 {
  3582  		delete(f.activeReservations, peerIDKey)
  3583  	}
  3584  	return ctx, nil
  3585  }
  3586  
  3587  // deleteReservationCtx deletes the reservation uniquely identified by the
  3588  // target public key of the peer, and the specified pending channel ID.
  3589  func (f *Manager) deleteReservationCtx(peerKey *secp256k1.PublicKey,
  3590  	pendingChanID [32]byte) {
  3591  
  3592  	// TODO(roasbeef): possibly cancel funding barrier in peer's
  3593  	// channelManager?
  3594  	peerIDKey := newSerializedKey(peerKey)
  3595  	f.resMtx.Lock()
  3596  	defer f.resMtx.Unlock()
  3597  
  3598  	nodeReservations, ok := f.activeReservations[peerIDKey]
  3599  	if !ok {
  3600  		// No reservations for this node.
  3601  		return
  3602  	}
  3603  	delete(nodeReservations, pendingChanID)
  3604  
  3605  	// If this was the last active reservation for this peer, delete the
  3606  	// peer's entry altogether.
  3607  	if len(nodeReservations) == 0 {
  3608  		delete(f.activeReservations, peerIDKey)
  3609  	}
  3610  }
  3611  
  3612  // getReservationCtx returns the reservation context for a particular pending
  3613  // channel ID for a target peer.
  3614  func (f *Manager) getReservationCtx(peerKey *secp256k1.PublicKey,
  3615  	pendingChanID [32]byte) (*reservationWithCtx, error) {
  3616  
  3617  	peerIDKey := newSerializedKey(peerKey)
  3618  	f.resMtx.RLock()
  3619  	resCtx, ok := f.activeReservations[peerIDKey][pendingChanID]
  3620  	f.resMtx.RUnlock()
  3621  
  3622  	if !ok {
  3623  		return nil, errors.Errorf("unknown channel (id: %x) for "+
  3624  			"peer(%x)", pendingChanID[:], peerIDKey[:])
  3625  	}
  3626  
  3627  	return resCtx, nil
  3628  }
  3629  
  3630  // IsPendingChannel returns a boolean indicating whether the channel identified
  3631  // by the pendingChanID and given peer is pending, meaning it is in the process
  3632  // of being funded. After the funding transaction has been confirmed, the
  3633  // channel will receive a new, permanent channel ID, and will no longer be
  3634  // considered pending.
  3635  func (f *Manager) IsPendingChannel(pendingChanID [32]byte,
  3636  	peer lnpeer.Peer) bool {
  3637  
  3638  	peerIDKey := newSerializedKey(peer.IdentityKey())
  3639  	f.resMtx.RLock()
  3640  	_, ok := f.activeReservations[peerIDKey][pendingChanID]
  3641  	f.resMtx.RUnlock()
  3642  
  3643  	return ok
  3644  }
  3645  
  3646  func copyPubKey(pub *secp256k1.PublicKey) *secp256k1.PublicKey {
  3647  	c := *pub
  3648  	return &c
  3649  }
  3650  
  3651  // saveChannelOpeningState saves the channelOpeningState for the provided
  3652  // chanPoint to the channelOpeningStateBucket.
  3653  func (f *Manager) saveChannelOpeningState(chanPoint *wire.OutPoint,
  3654  	state channelOpeningState, shortChanID *lnwire.ShortChannelID) error {
  3655  
  3656  	var outpointBytes bytes.Buffer
  3657  	if err := writeOutpoint(&outpointBytes, chanPoint); err != nil {
  3658  		return err
  3659  	}
  3660  
  3661  	// Save state and the uint64 representation of the shortChanID
  3662  	// for later use.
  3663  	scratch := make([]byte, 10)
  3664  	byteOrder.PutUint16(scratch[:2], uint16(state))
  3665  	byteOrder.PutUint64(scratch[2:], shortChanID.ToUint64())
  3666  	return f.cfg.Wallet.Cfg.Database.SaveChannelOpeningState(
  3667  		outpointBytes.Bytes(), scratch,
  3668  	)
  3669  }
  3670  
  3671  // getChannelOpeningState fetches the channelOpeningState for the provided
  3672  // chanPoint from the database, or returns ErrChannelNotFound if the channel
  3673  // is not found.
  3674  func (f *Manager) getChannelOpeningState(chanPoint *wire.OutPoint) (
  3675  	channelOpeningState, *lnwire.ShortChannelID, error) {
  3676  
  3677  	var outpointBytes bytes.Buffer
  3678  	if err := writeOutpoint(&outpointBytes, chanPoint); err != nil {
  3679  		return 0, nil, err
  3680  	}
  3681  
  3682  	value, err := f.cfg.Wallet.Cfg.Database.GetChannelOpeningState(
  3683  		outpointBytes.Bytes(),
  3684  	)
  3685  	if err != nil {
  3686  		return 0, nil, err
  3687  	}
  3688  
  3689  	state := channelOpeningState(byteOrder.Uint16(value[:2]))
  3690  	shortChanID := lnwire.NewShortChanIDFromInt(byteOrder.Uint64(value[2:]))
  3691  	return state, &shortChanID, nil
  3692  }
  3693  
  3694  // deleteChannelOpeningState removes any state for chanPoint from the database.
  3695  func (f *Manager) deleteChannelOpeningState(chanPoint *wire.OutPoint) error {
  3696  	var outpointBytes bytes.Buffer
  3697  	if err := writeOutpoint(&outpointBytes, chanPoint); err != nil {
  3698  		return err
  3699  	}
  3700  
  3701  	return f.cfg.Wallet.Cfg.Database.DeleteChannelOpeningState(
  3702  		outpointBytes.Bytes(),
  3703  	)
  3704  }