github.com/decred/dcrlnd@v0.7.6/lnwallet/channel.go (about)

     1  package lnwallet
     2  
     3  import (
     4  	"bytes"
     5  	"container/list"
     6  	"crypto/sha256"
     7  	"errors"
     8  	"fmt"
     9  	"math"
    10  	"sort"
    11  	"sync"
    12  
    13  	"github.com/davecgh/go-spew/spew"
    14  	"github.com/decred/slog"
    15  
    16  	"github.com/decred/dcrd/blockchain/standalone/v2"
    17  	"github.com/decred/dcrd/chaincfg/chainhash"
    18  	"github.com/decred/dcrd/chaincfg/v3"
    19  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    20  	"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
    21  	"github.com/decred/dcrd/dcrutil/v4"
    22  	"github.com/decred/dcrd/dcrutil/v4/txsort"
    23  	"github.com/decred/dcrd/txscript/v4"
    24  	"github.com/decred/dcrd/wire"
    25  
    26  	"github.com/decred/dcrlnd/build"
    27  	"github.com/decred/dcrlnd/chainntnfs"
    28  	"github.com/decred/dcrlnd/channeldb"
    29  	"github.com/decred/dcrlnd/input"
    30  	"github.com/decred/dcrlnd/lntypes"
    31  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    32  	"github.com/decred/dcrlnd/lnwire"
    33  	"github.com/decred/dcrlnd/shachain"
    34  )
    35  
    36  const scriptVersion uint16 = 0
    37  
    38  // disableFeeFloorCheck is used in certain tests to disable the fee floor
    39  // check. This is required due to some tests being ported from Bitcoin that has
    40  // a lower fee floor which would prevent the channel from correctly advancing
    41  // in Decred.
    42  var disableFeeFloorCheck = false
    43  
    44  var (
    45  	// ErrChanClosing is returned when a caller attempts to close a channel
    46  	// that has already been closed or is in the process of being closed.
    47  	ErrChanClosing = fmt.Errorf("channel is being closed, operation disallowed")
    48  
    49  	// ErrNoWindow is returned when revocation window is exhausted.
    50  	ErrNoWindow = fmt.Errorf("unable to sign new commitment, the current" +
    51  		" revocation window is exhausted")
    52  
    53  	// ErrMaxSizeCost is returned when the cost/size
    54  	// exceeds the widely used maximum allowed policy size limit. In this
    55  	// case the commitment transaction can't be propagated through the
    56  	// network.
    57  	ErrMaxSizeCost = fmt.Errorf("commitment transaction exceed max " +
    58  		"available cost")
    59  
    60  	// ErrMaxHTLCNumber is returned when a proposed HTLC would exceed the
    61  	// maximum number of allowed HTLC's if committed in a state transition
    62  	ErrMaxHTLCNumber = fmt.Errorf("commitment transaction exceed max " +
    63  		"htlc number")
    64  
    65  	// ErrMaxPendingAmount is returned when a proposed HTLC would exceed
    66  	// the overall maximum pending value of all HTLCs if committed in a
    67  	// state transition.
    68  	ErrMaxPendingAmount = fmt.Errorf("commitment transaction exceed max" +
    69  		"overall pending htlc value")
    70  
    71  	// ErrBelowChanReserve is returned when a proposed HTLC would cause
    72  	// one of the peer's funds to dip below the channel reserve limit.
    73  	ErrBelowChanReserve = fmt.Errorf("commitment transaction dips peer " +
    74  		"below chan reserve")
    75  
    76  	// ErrBelowMinHTLC is returned when a proposed HTLC has a value that
    77  	// is below the minimum HTLC value constraint for either us or our
    78  	// peer depending on which flags are set.
    79  	ErrBelowMinHTLC = fmt.Errorf("proposed HTLC value is below minimum " +
    80  		"allowed HTLC value")
    81  
    82  	// ErrInvalidHTLCAmt signals that a proposed HTLC has a value that is
    83  	// not positive.
    84  	ErrInvalidHTLCAmt = fmt.Errorf("proposed HTLC value must be positive")
    85  
    86  	// ErrCannotSyncCommitChains is returned if, upon receiving a ChanSync
    87  	// message, the state machine deems that is unable to properly
    88  	// synchronize states with the remote peer. In this case we should fail
    89  	// the channel, but we won't automatically force close.
    90  	ErrCannotSyncCommitChains = fmt.Errorf("unable to sync commit chains")
    91  
    92  	// ErrInvalidLastCommitSecret is returned in the case that the
    93  	// commitment secret sent by the remote party in their
    94  	// ChannelReestablish message doesn't match the last secret we sent.
    95  	ErrInvalidLastCommitSecret = fmt.Errorf("commit secret is incorrect")
    96  
    97  	// ErrInvalidLocalUnrevokedCommitPoint is returned in the case that the
    98  	// commitment point sent by the remote party in their
    99  	// ChannelReestablish message doesn't match the last unrevoked commit
   100  	// point they sent us.
   101  	ErrInvalidLocalUnrevokedCommitPoint = fmt.Errorf("unrevoked commit " +
   102  		"point is invalid")
   103  
   104  	// ErrCommitSyncRemoteDataLoss is returned in the case that we receive
   105  	// a ChannelReestablish message from the remote that advertises a
   106  	// NextLocalCommitHeight that is lower than what they have already
   107  	// ACKed, or a RemoteCommitTailHeight that is lower than our revoked
   108  	// height. In this case we should force close the channel such that
   109  	// both parties can retrieve their funds.
   110  	ErrCommitSyncRemoteDataLoss = fmt.Errorf("possible remote commitment " +
   111  		"state data loss")
   112  )
   113  
   114  // ErrCommitSyncLocalDataLoss is returned in the case that we receive a valid
   115  // commit secret within the ChannelReestablish message from the remote node AND
   116  // they advertise a RemoteCommitTailHeight higher than our current known
   117  // height. This means we have lost some critical data, and must fail the
   118  // channel and MUST NOT force close it. Instead we should wait for the remote
   119  // to force close it, such that we can attempt to sweep our funds. The
   120  // commitment point needed to sweep the remote's force close is encapsuled.
   121  type ErrCommitSyncLocalDataLoss struct {
   122  	// ChannelPoint is the identifier for the channel that experienced data
   123  	// loss.
   124  	ChannelPoint wire.OutPoint
   125  
   126  	// CommitPoint is the last unrevoked commit point, sent to us by the
   127  	// remote when we determined we had lost state.
   128  	CommitPoint *secp256k1.PublicKey
   129  }
   130  
   131  // Error returns a string representation of the local data loss error.
   132  func (e *ErrCommitSyncLocalDataLoss) Error() string {
   133  	return fmt.Sprintf("ChannelPoint(%v) with CommitPoint(%x) had "+
   134  		"possible local commitment state data loss", e.ChannelPoint,
   135  		e.CommitPoint.SerializeCompressed())
   136  }
   137  
   138  // channelState is an enum like type which represents the current state of a
   139  // particular channel.
   140  // TODO(roasbeef): actually update state
   141  type channelState uint8
   142  
   143  const (
   144  	// channelPending indicates this channel is still going through the
   145  	// funding workflow, and isn't yet open.
   146  	channelPending channelState = iota // nolint: unused
   147  
   148  	// channelOpen represents an open, active channel capable of
   149  	// sending/receiving HTLCs.
   150  	channelOpen
   151  
   152  	// channelClosing represents a channel which is in the process of being
   153  	// closed.
   154  	channelClosing
   155  
   156  	// channelClosed represents a channel which has been fully closed. Note
   157  	// that before a channel can be closed, ALL pending HTLCs must be
   158  	// settled/removed.
   159  	channelClosed
   160  
   161  	// channelDispute indicates that an un-cooperative closure has been
   162  	// detected within the channel.
   163  	channelDispute
   164  
   165  	// channelPendingPayment indicates that there a currently outstanding
   166  	// HTLCs within the channel.
   167  	channelPendingPayment // nolint:unused
   168  )
   169  
   170  // PaymentHash represents the sha256 of a random value. This hash is used to
   171  // uniquely track incoming/outgoing payments within this channel, as well as
   172  // payments requested by the wallet/daemon.
   173  type PaymentHash [32]byte
   174  
   175  // updateType is the exact type of an entry within the shared HTLC log.
   176  type updateType uint8
   177  
   178  const (
   179  	// Add is an update type that adds a new HTLC entry into the log.
   180  	// Either side can add a new pending HTLC by adding a new Add entry
   181  	// into their update log.
   182  	Add updateType = iota
   183  
   184  	// Fail is an update type which removes a prior HTLC entry from the
   185  	// log. Adding a Fail entry to ones log will modify the _remote_
   186  	// parties update log once a new commitment view has been evaluated
   187  	// which contains the Fail entry.
   188  	Fail
   189  
   190  	// MalformedFail is an update type which removes a prior HTLC entry
   191  	// from the log. Adding a MalformedFail entry to ones log will modify
   192  	// the _remote_ parties update log once a new commitment view has been
   193  	// evaluated which contains the MalformedFail entry. The difference
   194  	// from Fail type lie in the different data we have to store.
   195  	MalformedFail
   196  
   197  	// Settle is an update type which settles a prior HTLC crediting the
   198  	// balance of the receiving node. Adding a Settle entry to a log will
   199  	// result in the settle entry being removed on the log as well as the
   200  	// original add entry from the remote party's log after the next state
   201  	// transition.
   202  	Settle
   203  
   204  	// FeeUpdate is an update type sent by the channel initiator that
   205  	// updates the fee rate used when signing the commitment transaction.
   206  	FeeUpdate
   207  )
   208  
   209  // String returns a human readable string that uniquely identifies the target
   210  // update type.
   211  func (u updateType) String() string {
   212  	switch u {
   213  	case Add:
   214  		return "Add"
   215  	case Fail:
   216  		return "Fail"
   217  	case MalformedFail:
   218  		return "MalformedFail"
   219  	case Settle:
   220  		return "Settle"
   221  	case FeeUpdate:
   222  		return "FeeUpdate"
   223  	default:
   224  		return "<unknown type>"
   225  	}
   226  }
   227  
   228  // PaymentDescriptor represents a commitment state update which either adds,
   229  // settles, or removes an HTLC. PaymentDescriptors encapsulate all necessary
   230  // metadata w.r.t to an HTLC, and additional data pairing a settle message to
   231  // the original added HTLC.
   232  //
   233  // TODO(roasbeef): LogEntry interface??
   234  //   - need to separate attrs for cancel/add/settle/feeupdate
   235  type PaymentDescriptor struct {
   236  	// RHash is the payment hash for this HTLC. The HTLC can be settled iff
   237  	// the preimage to this hash is presented.
   238  	RHash PaymentHash
   239  
   240  	// RPreimage is the preimage that settles the HTLC pointed to within the
   241  	// log by the ParentIndex.
   242  	RPreimage lntypes.Preimage
   243  
   244  	// Timeout is the absolute timeout in blocks, after which this HTLC
   245  	// expires.
   246  	Timeout uint32
   247  
   248  	// Amount is the HTLC amount in milli-atoms.
   249  	Amount lnwire.MilliAtom
   250  
   251  	// LogIndex is the log entry number that his HTLC update has within the
   252  	// log. Depending on if IsIncoming is true, this is either an entry the
   253  	// remote party added, or one that we added locally.
   254  	LogIndex uint64
   255  
   256  	// HtlcIndex is the index within the main update log for this HTLC.
   257  	// Entries within the log of type Add will have this field populated,
   258  	// as other entries will point to the entry via this counter.
   259  	//
   260  	// NOTE: This field will only be populate if EntryType is Add.
   261  	HtlcIndex uint64
   262  
   263  	// ParentIndex is the HTLC index of the entry that this update settles or
   264  	// times out.
   265  	//
   266  	// NOTE: This field will only be populate if EntryType is Fail or
   267  	// Settle.
   268  	ParentIndex uint64
   269  
   270  	// SourceRef points to an Add update in a forwarding package owned by
   271  	// this channel.
   272  	//
   273  	// NOTE: This field will only be populated if EntryType is Fail or
   274  	// Settle.
   275  	SourceRef *channeldb.AddRef
   276  
   277  	// DestRef points to a Fail/Settle update in another link's forwarding
   278  	// package.
   279  	//
   280  	// NOTE: This field will only be populated if EntryType is Fail or
   281  	// Settle, and the forwarded Add successfully included in an outgoing
   282  	// link's commitment txn.
   283  	DestRef *channeldb.SettleFailRef
   284  
   285  	// OpenCircuitKey references the incoming Chan/HTLC ID of an Add HTLC
   286  	// packet delivered by the switch.
   287  	//
   288  	// NOTE: This field is only populated for payment descriptors in the
   289  	// *local* update log, and if the Add packet was delivered by the
   290  	// switch.
   291  	OpenCircuitKey *channeldb.CircuitKey
   292  
   293  	// ClosedCircuitKey references the incoming Chan/HTLC ID of the Add HTLC
   294  	// that opened the circuit.
   295  	//
   296  	// NOTE: This field is only populated for payment descriptors in the
   297  	// *local* update log, and if settle/fails have a committed circuit in
   298  	// the circuit map.
   299  	ClosedCircuitKey *channeldb.CircuitKey
   300  
   301  	// localOutputIndex is the output index of this HTLC output in the
   302  	// commitment transaction of the local node.
   303  	//
   304  	// NOTE: If the output is dust from the PoV of the local commitment
   305  	// chain, then this value will be -1.
   306  	localOutputIndex int32
   307  
   308  	// remoteOutputIndex is the output index of this HTLC output in the
   309  	// commitment transaction of the remote node.
   310  	//
   311  	// NOTE: If the output is dust from the PoV of the remote commitment
   312  	// chain, then this value will be -1.
   313  	remoteOutputIndex int32
   314  
   315  	// sig is the signature for the second-level HTLC transaction that
   316  	// spends the version of this HTLC on the commitment transaction of the
   317  	// local node. This signature is generated by the remote node and
   318  	// stored by the local node in the case that local node needs to
   319  	// broadcast their commitment transaction.
   320  	sig *ecdsa.Signature
   321  
   322  	// addCommitHeight[Remote|Local] encodes the height of the commitment
   323  	// which included this HTLC on either the remote or local commitment
   324  	// chain. This value is used to determine when an HTLC is fully
   325  	// "locked-in".
   326  	addCommitHeightRemote uint64
   327  	addCommitHeightLocal  uint64
   328  
   329  	// removeCommitHeight[Remote|Local] encodes the height of the
   330  	// commitment which removed the parent pointer of this
   331  	// PaymentDescriptor either due to a timeout or a settle. Once both
   332  	// these heights are below the tail of both chains, the log entries can
   333  	// safely be removed.
   334  	removeCommitHeightRemote uint64
   335  	removeCommitHeightLocal  uint64
   336  
   337  	// OnionBlob is an opaque blob which is used to complete multi-hop
   338  	// routing.
   339  	//
   340  	// NOTE: Populated only on add payment descriptor entry types.
   341  	OnionBlob []byte
   342  
   343  	// ShaOnionBlob is a sha of the onion blob.
   344  	//
   345  	// NOTE: Populated only in payment descriptor with MalformedFail type.
   346  	ShaOnionBlob [sha256.Size]byte
   347  
   348  	// FailReason stores the reason why a particular payment was canceled.
   349  	//
   350  	// NOTE: Populate only in fail payment descriptor entry types.
   351  	FailReason []byte
   352  
   353  	// FailCode stores the code why a particular payment was canceled.
   354  	//
   355  	// NOTE: Populated only in payment descriptor with MalformedFail type.
   356  	FailCode lnwire.FailCode
   357  
   358  	// [our|their|]PkScript are the raw public key scripts that encodes the
   359  	// redemption rules for this particular HTLC. These fields will only be
   360  	// populated iff the EntryType of this PaymentDescriptor is Add.
   361  	// ourPkScript is the ourPkScript from the context of our local
   362  	// commitment chain. theirPkScript is the latest pkScript from the
   363  	// context of the remote commitment chain.
   364  	//
   365  	// NOTE: These values may change within the logs themselves, however,
   366  	// they'll stay consistent within the commitment chain entries
   367  	// themselves.
   368  	ourPkScript        []byte
   369  	ourWitnessScript   []byte
   370  	theirPkScript      []byte
   371  	theirWitnessScript []byte
   372  
   373  	// EntryType denotes the exact type of the PaymentDescriptor. In the
   374  	// case of a Timeout, or Settle type, then the Parent field will point
   375  	// into the log to the HTLC being modified.
   376  	EntryType updateType
   377  
   378  	// isForwarded denotes if an incoming HTLC has been forwarded to any
   379  	// possible upstream peers in the route.
   380  	isForwarded bool
   381  }
   382  
   383  // PayDescsFromRemoteLogUpdates converts a slice of LogUpdates received from the
   384  // remote peer into PaymentDescriptors to inform a link's forwarding decisions.
   385  //
   386  // NOTE: The provided `logUpdates` MUST corresponding exactly to either the Adds
   387  // or SettleFails in this channel's forwarding package at `height`.
   388  func PayDescsFromRemoteLogUpdates(chanID lnwire.ShortChannelID, height uint64,
   389  	logUpdates []channeldb.LogUpdate) ([]*PaymentDescriptor, error) {
   390  
   391  	// Allocate enough space to hold all of the payment descriptors we will
   392  	// reconstruct, and also the list of pointers that will be returned to
   393  	// the caller.
   394  	payDescs := make([]PaymentDescriptor, 0, len(logUpdates))
   395  	payDescPtrs := make([]*PaymentDescriptor, 0, len(logUpdates))
   396  
   397  	// Iterate over the log updates we loaded from disk, and reconstruct the
   398  	// payment descriptor corresponding to one of the four types of htlcs we
   399  	// can receive from the remote peer. We only repopulate the information
   400  	// necessary to process the packets and, if necessary, forward them to
   401  	// the switch.
   402  	//
   403  	// For each log update, we include either an AddRef or a SettleFailRef
   404  	// so that they can be ACK'd and garbage collected.
   405  	for i, logUpdate := range logUpdates {
   406  		var pd PaymentDescriptor
   407  		switch wireMsg := logUpdate.UpdateMsg.(type) {
   408  
   409  		case *lnwire.UpdateAddHTLC:
   410  			pd = PaymentDescriptor{
   411  				RHash:     wireMsg.PaymentHash,
   412  				Timeout:   wireMsg.Expiry,
   413  				Amount:    wireMsg.Amount,
   414  				EntryType: Add,
   415  				HtlcIndex: wireMsg.ID,
   416  				LogIndex:  logUpdate.LogIndex,
   417  				SourceRef: &channeldb.AddRef{
   418  					Height: height,
   419  					Index:  uint16(i),
   420  				},
   421  			}
   422  			pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
   423  			copy(pd.OnionBlob, wireMsg.OnionBlob[:])
   424  
   425  		case *lnwire.UpdateFulfillHTLC:
   426  			pd = PaymentDescriptor{
   427  				RPreimage:   wireMsg.PaymentPreimage,
   428  				ParentIndex: wireMsg.ID,
   429  				EntryType:   Settle,
   430  				DestRef: &channeldb.SettleFailRef{
   431  					Source: chanID,
   432  					Height: height,
   433  					Index:  uint16(i),
   434  				},
   435  			}
   436  
   437  		case *lnwire.UpdateFailHTLC:
   438  			pd = PaymentDescriptor{
   439  				ParentIndex: wireMsg.ID,
   440  				EntryType:   Fail,
   441  				FailReason:  wireMsg.Reason[:],
   442  				DestRef: &channeldb.SettleFailRef{
   443  					Source: chanID,
   444  					Height: height,
   445  					Index:  uint16(i),
   446  				},
   447  			}
   448  
   449  		case *lnwire.UpdateFailMalformedHTLC:
   450  			pd = PaymentDescriptor{
   451  				ParentIndex:  wireMsg.ID,
   452  				EntryType:    MalformedFail,
   453  				FailCode:     wireMsg.FailureCode,
   454  				ShaOnionBlob: wireMsg.ShaOnionBlob,
   455  				DestRef: &channeldb.SettleFailRef{
   456  					Source: chanID,
   457  					Height: height,
   458  					Index:  uint16(i),
   459  				},
   460  			}
   461  
   462  		// NOTE: UpdateFee is not expected since they are not forwarded.
   463  		case *lnwire.UpdateFee:
   464  			return nil, fmt.Errorf("unexpected update fee")
   465  
   466  		}
   467  
   468  		payDescs = append(payDescs, pd)
   469  		payDescPtrs = append(payDescPtrs, &payDescs[i])
   470  	}
   471  
   472  	return payDescPtrs, nil
   473  }
   474  
   475  // commitment represents a commitment to a new state within an active channel.
   476  // New commitments can be initiated by either side. Commitments are ordered
   477  // into a commitment chain, with one existing for both parties. Each side can
   478  // independently extend the other side's commitment chain, up to a certain
   479  // "revocation window", which once reached, disallows new commitments until
   480  // the local nodes receives the revocation for the remote node's chain tail.
   481  type commitment struct {
   482  	// height represents the commitment height of this commitment, or the
   483  	// update number of this commitment.
   484  	height uint64
   485  
   486  	// isOurs indicates whether this is the local or remote node's version
   487  	// of the commitment.
   488  	isOurs bool
   489  
   490  	// [our|their]MessageIndex are indexes into the HTLC log, up to which
   491  	// this commitment transaction includes. These indexes allow both sides
   492  	// to independently, and concurrent send create new commitments. Each
   493  	// new commitment sent to the remote party includes an index in the
   494  	// shared log which details which of their updates we're including in
   495  	// this new commitment.
   496  	ourMessageIndex   uint64
   497  	theirMessageIndex uint64
   498  
   499  	// [our|their]HtlcIndex are the current running counters for the HTLC's
   500  	// offered by either party. This value is incremented each time a party
   501  	// offers a new HTLC. The log update methods that consume HTLC's will
   502  	// reference these counters, rather than the running cumulative message
   503  	// counters.
   504  	ourHtlcIndex   uint64
   505  	theirHtlcIndex uint64
   506  
   507  	// txn is the commitment transaction generated by including any HTLC
   508  	// updates whose index are below the two indexes listed above. If this
   509  	// commitment is being added to the remote chain, then this txn is
   510  	// their version of the commitment transactions. If the local commit
   511  	// chain is being modified, the opposite is true.
   512  	txn *wire.MsgTx
   513  
   514  	// sig is a signature for the above commitment transaction.
   515  	sig []byte
   516  
   517  	// [our|their]Balance represents the settled balances at this point
   518  	// within the commitment chain. This balance is computed by properly
   519  	// evaluating all the add/remove/settle log entries before the listed
   520  	// indexes.
   521  	//
   522  	// NOTE: This is the balance *after* subtracting any commitment fee,
   523  	// AND anchor output values.
   524  	ourBalance   lnwire.MilliAtom
   525  	theirBalance lnwire.MilliAtom
   526  
   527  	// fee is the amount that will be paid as fees for this commitment
   528  	// transaction. The fee is recorded here so that it can be added back
   529  	// and recalculated for each new update to the channel state.
   530  	fee dcrutil.Amount
   531  
   532  	// feePerKB is the fee per kw used to calculate this commitment
   533  	// transaction's fee.
   534  	feePerKB chainfee.AtomPerKByte
   535  
   536  	// dustLimit is the limit on the commitment transaction such that no
   537  	// output values should be below this amount.
   538  	dustLimit dcrutil.Amount
   539  
   540  	// outgoingHTLCs is a slice of all the outgoing HTLC's (from our PoV)
   541  	// on this commitment transaction.
   542  	outgoingHTLCs []PaymentDescriptor
   543  
   544  	// incomingHTLCs is a slice of all the incoming HTLC's (from our PoV)
   545  	// on this commitment transaction.
   546  	incomingHTLCs []PaymentDescriptor
   547  
   548  	// [outgoing|incoming]HTLCIndex is an index that maps an output index
   549  	// on the commitment transaction to the payment descriptor that
   550  	// represents the HTLC output.
   551  	//
   552  	// NOTE: that these fields are only populated if this commitment state
   553  	// belongs to the local node. These maps are used when validating any
   554  	// HTLC signatures which are part of the local commitment state. We use
   555  	// this map in order to locate the details needed to validate an HTLC
   556  	// signature while iterating of the outputs in the local commitment
   557  	// view.
   558  	outgoingHTLCIndex map[int32]*PaymentDescriptor
   559  	incomingHTLCIndex map[int32]*PaymentDescriptor
   560  }
   561  
   562  // locateOutputIndex is a small helper function to locate the output index of a
   563  // particular HTLC within the current commitment transaction. The duplicate map
   564  // massed in is to be retained for each output within the commitment
   565  // transition.  This ensures that we don't assign multiple HTLC's to the same
   566  // index within the commitment transaction.
   567  func locateOutputIndex(p *PaymentDescriptor, tx *wire.MsgTx, ourCommit bool,
   568  	dups map[PaymentHash][]int32, cltvs []uint32) (int32, error) {
   569  
   570  	// Checks to see if element (e) exists in slice (s).
   571  	contains := func(s []int32, e int32) bool {
   572  		for _, a := range s {
   573  			if a == e {
   574  				return true
   575  			}
   576  		}
   577  		return false
   578  	}
   579  
   580  	// If this their commitment transaction, we'll be trying to locate
   581  	// their pkScripts, otherwise we'll be looking for ours. This is
   582  	// required as the commitment states are asymmetric in order to ascribe
   583  	// blame in the case of a contract breach.
   584  	pkScript := p.theirPkScript
   585  	if ourCommit {
   586  		pkScript = p.ourPkScript
   587  	}
   588  
   589  	for i, txOut := range tx.TxOut {
   590  		cltv := cltvs[i]
   591  
   592  		if bytes.Equal(txOut.PkScript, pkScript) &&
   593  			txOut.Value == int64(p.Amount.ToAtoms()) &&
   594  			cltv == p.Timeout {
   595  
   596  			// If this payment hash and index has already been
   597  			// found, then we'll continue in order to avoid any
   598  			// duplicate indexes.
   599  			if contains(dups[p.RHash], int32(i)) {
   600  				continue
   601  			}
   602  
   603  			idx := int32(i)
   604  			dups[p.RHash] = append(dups[p.RHash], idx)
   605  			return idx, nil
   606  		}
   607  	}
   608  
   609  	return 0, fmt.Errorf("unable to find htlc: script=%x, value=%v, "+
   610  		"cltv=%v", pkScript, p.Amount, p.Timeout)
   611  }
   612  
   613  // populateHtlcIndexes modifies the set of HTLC's locked-into the target view
   614  // to have full indexing information populated. This information is required as
   615  // we need to keep track of the indexes of each HTLC in order to properly write
   616  // the current state to disk, and also to locate the PaymentDescriptor
   617  // corresponding to HTLC outputs in the commitment transaction.
   618  func (c *commitment) populateHtlcIndexes(chanType channeldb.ChannelType,
   619  	cltvs []uint32) error {
   620  
   621  	// First, we'll set up some state to allow us to locate the output
   622  	// index of the all the HTLC's within the commitment transaction. We
   623  	// must keep this index so we can validate the HTLC signatures sent to
   624  	// us.
   625  	dups := make(map[PaymentHash][]int32)
   626  	c.outgoingHTLCIndex = make(map[int32]*PaymentDescriptor)
   627  	c.incomingHTLCIndex = make(map[int32]*PaymentDescriptor)
   628  
   629  	// populateIndex is a helper function that populates the necessary
   630  	// indexes within the commitment view for a particular HTLC.
   631  	populateIndex := func(htlc *PaymentDescriptor, incoming bool) error {
   632  		isDust := HtlcIsDust(
   633  			chanType, incoming, c.isOurs, c.feePerKB,
   634  			htlc.Amount.ToAtoms(), c.dustLimit,
   635  		)
   636  
   637  		var err error
   638  		switch {
   639  
   640  		// If this is our commitment transaction, and this is a dust
   641  		// output then we mark it as such using a -1 index.
   642  		case c.isOurs && isDust:
   643  			htlc.localOutputIndex = -1
   644  
   645  		// If this is the commitment transaction of the remote party,
   646  		// and this is a dust output then we mark it as such using a -1
   647  		// index.
   648  		case !c.isOurs && isDust:
   649  			htlc.remoteOutputIndex = -1
   650  
   651  		// If this is our commitment transaction, then we'll need to
   652  		// locate the output and the index so we can verify an HTLC
   653  		// signatures.
   654  		case c.isOurs:
   655  			htlc.localOutputIndex, err = locateOutputIndex(
   656  				htlc, c.txn, c.isOurs, dups, cltvs,
   657  			)
   658  			if err != nil {
   659  				return err
   660  			}
   661  
   662  			// As this is our commitment transactions, we need to
   663  			// keep track of the locations of each output on the
   664  			// transaction so we can verify any HTLC signatures
   665  			// sent to us after we construct the HTLC view.
   666  			if incoming {
   667  				c.incomingHTLCIndex[htlc.localOutputIndex] = htlc
   668  			} else {
   669  				c.outgoingHTLCIndex[htlc.localOutputIndex] = htlc
   670  			}
   671  
   672  		// Otherwise, this is there remote party's commitment
   673  		// transaction and we only need to populate the remote output
   674  		// index within the HTLC index.
   675  		case !c.isOurs:
   676  			htlc.remoteOutputIndex, err = locateOutputIndex(
   677  				htlc, c.txn, c.isOurs, dups, cltvs,
   678  			)
   679  			if err != nil {
   680  				return err
   681  			}
   682  
   683  		default:
   684  			return fmt.Errorf("invalid commitment configuration")
   685  		}
   686  
   687  		return nil
   688  	}
   689  
   690  	// Finally, we'll need to locate the index within the commitment
   691  	// transaction of all the HTLC outputs. This index will be required
   692  	// later when we write the commitment state to disk, and also when
   693  	// generating signatures for each of the HTLC transactions.
   694  	for i := 0; i < len(c.outgoingHTLCs); i++ {
   695  		htlc := &c.outgoingHTLCs[i]
   696  		if err := populateIndex(htlc, false); err != nil {
   697  			return err
   698  		}
   699  	}
   700  	for i := 0; i < len(c.incomingHTLCs); i++ {
   701  		htlc := &c.incomingHTLCs[i]
   702  		if err := populateIndex(htlc, true); err != nil {
   703  			return err
   704  		}
   705  	}
   706  
   707  	return nil
   708  }
   709  
   710  // toDiskCommit converts the target commitment into a format suitable to be
   711  // written to disk after an accepted state transition.
   712  func (c *commitment) toDiskCommit(ourCommit bool) *channeldb.ChannelCommitment {
   713  	numHtlcs := len(c.outgoingHTLCs) + len(c.incomingHTLCs)
   714  
   715  	commit := &channeldb.ChannelCommitment{
   716  		CommitHeight:    c.height,
   717  		LocalLogIndex:   c.ourMessageIndex,
   718  		LocalHtlcIndex:  c.ourHtlcIndex,
   719  		RemoteLogIndex:  c.theirMessageIndex,
   720  		RemoteHtlcIndex: c.theirHtlcIndex,
   721  		LocalBalance:    c.ourBalance,
   722  		RemoteBalance:   c.theirBalance,
   723  		CommitFee:       c.fee,
   724  		FeePerKB:        dcrutil.Amount(c.feePerKB),
   725  		CommitTx:        c.txn,
   726  		CommitSig:       c.sig,
   727  		Htlcs:           make([]channeldb.HTLC, 0, numHtlcs),
   728  	}
   729  
   730  	for _, htlc := range c.outgoingHTLCs {
   731  		outputIndex := htlc.localOutputIndex
   732  		if !ourCommit {
   733  			outputIndex = htlc.remoteOutputIndex
   734  		}
   735  
   736  		h := channeldb.HTLC{
   737  			RHash:         htlc.RHash,
   738  			Amt:           htlc.Amount,
   739  			RefundTimeout: htlc.Timeout,
   740  			OutputIndex:   outputIndex,
   741  			HtlcIndex:     htlc.HtlcIndex,
   742  			LogIndex:      htlc.LogIndex,
   743  			Incoming:      false,
   744  		}
   745  		h.OnionBlob = make([]byte, len(htlc.OnionBlob))
   746  		copy(h.OnionBlob, htlc.OnionBlob)
   747  
   748  		if ourCommit && htlc.sig != nil {
   749  			h.Signature = htlc.sig.Serialize()
   750  		}
   751  
   752  		commit.Htlcs = append(commit.Htlcs, h)
   753  	}
   754  
   755  	for _, htlc := range c.incomingHTLCs {
   756  		outputIndex := htlc.localOutputIndex
   757  		if !ourCommit {
   758  			outputIndex = htlc.remoteOutputIndex
   759  		}
   760  
   761  		h := channeldb.HTLC{
   762  			RHash:         htlc.RHash,
   763  			Amt:           htlc.Amount,
   764  			RefundTimeout: htlc.Timeout,
   765  			OutputIndex:   outputIndex,
   766  			HtlcIndex:     htlc.HtlcIndex,
   767  			LogIndex:      htlc.LogIndex,
   768  			Incoming:      true,
   769  		}
   770  		h.OnionBlob = make([]byte, len(htlc.OnionBlob))
   771  		copy(h.OnionBlob, htlc.OnionBlob)
   772  
   773  		if ourCommit && htlc.sig != nil {
   774  			h.Signature = htlc.sig.Serialize()
   775  		}
   776  
   777  		commit.Htlcs = append(commit.Htlcs, h)
   778  	}
   779  
   780  	return commit
   781  }
   782  
   783  // diskHtlcToPayDesc converts an HTLC previously written to disk within a
   784  // commitment state to the form required to manipulate in memory within the
   785  // commitment struct and updateLog. This function is used when we need to
   786  // restore commitment state written do disk back into memory once we need to
   787  // restart a channel session.
   788  func (lc *LightningChannel) diskHtlcToPayDesc(feeRate chainfee.AtomPerKByte,
   789  	commitHeight uint64, htlc *channeldb.HTLC, localCommitKeys,
   790  	remoteCommitKeys *CommitmentKeyRing, isLocal bool) (PaymentDescriptor,
   791  	error) {
   792  
   793  	// The proper pkScripts for this PaymentDescriptor must be
   794  	// generated so we can easily locate them within the commitment
   795  	// transaction in the future.
   796  	var (
   797  		ourP2SH, theirP2SH                   []byte
   798  		ourWitnessScript, theirWitnessScript []byte
   799  		pd                                   PaymentDescriptor
   800  		err                                  error
   801  		chanType                             = lc.channelState.ChanType
   802  	)
   803  
   804  	// If the either outputs is dust from the local or remote node's
   805  	// perspective, then we don't need to generate the scripts as we only
   806  	// generate them in order to locate the outputs within the commitment
   807  	// transaction. As we'll mark dust with a special output index in the
   808  	// on-disk state snapshot.
   809  	isDustLocal := HtlcIsDust(
   810  		chanType, htlc.Incoming, true, feeRate,
   811  		htlc.Amt.ToAtoms(), lc.channelState.LocalChanCfg.DustLimit,
   812  	)
   813  	if !isDustLocal && localCommitKeys != nil {
   814  		ourP2SH, ourWitnessScript, err = genHtlcScript(
   815  			chanType, htlc.Incoming, true, htlc.RefundTimeout,
   816  			htlc.RHash, localCommitKeys)
   817  		if err != nil {
   818  			return pd, err
   819  		}
   820  	}
   821  	isDustRemote := HtlcIsDust(
   822  		chanType, htlc.Incoming, false, feeRate,
   823  		htlc.Amt.ToAtoms(), lc.channelState.RemoteChanCfg.DustLimit,
   824  	)
   825  	if !isDustRemote && remoteCommitKeys != nil {
   826  		theirP2SH, theirWitnessScript, err = genHtlcScript(
   827  			chanType, htlc.Incoming, false, htlc.RefundTimeout,
   828  			htlc.RHash, remoteCommitKeys)
   829  		if err != nil {
   830  			return pd, err
   831  		}
   832  	}
   833  
   834  	// Reconstruct the proper local/remote output indexes from the HTLC's
   835  	// persisted output index depending on whose commitment we are
   836  	// generating.
   837  	var (
   838  		localOutputIndex  int32
   839  		remoteOutputIndex int32
   840  	)
   841  	if isLocal {
   842  		localOutputIndex = htlc.OutputIndex
   843  	} else {
   844  		remoteOutputIndex = htlc.OutputIndex
   845  	}
   846  
   847  	// With the scripts reconstructed (depending on if this is our commit
   848  	// vs theirs or a pending commit for the remote party), we can now
   849  	// re-create the original payment descriptor.
   850  	pd = PaymentDescriptor{
   851  		RHash:              htlc.RHash,
   852  		Timeout:            htlc.RefundTimeout,
   853  		Amount:             htlc.Amt,
   854  		EntryType:          Add,
   855  		HtlcIndex:          htlc.HtlcIndex,
   856  		LogIndex:           htlc.LogIndex,
   857  		OnionBlob:          htlc.OnionBlob,
   858  		localOutputIndex:   localOutputIndex,
   859  		remoteOutputIndex:  remoteOutputIndex,
   860  		ourPkScript:        ourP2SH,
   861  		ourWitnessScript:   ourWitnessScript,
   862  		theirPkScript:      theirP2SH,
   863  		theirWitnessScript: theirWitnessScript,
   864  	}
   865  
   866  	return pd, nil
   867  }
   868  
   869  // extractPayDescs will convert all HTLC's present within a disk commit state
   870  // to a set of incoming and outgoing payment descriptors. Once reconstructed,
   871  // these payment descriptors can be re-inserted into the in-memory updateLog
   872  // for each side.
   873  func (lc *LightningChannel) extractPayDescs(commitHeight uint64,
   874  	feeRate chainfee.AtomPerKByte, htlcs []channeldb.HTLC, localCommitKeys,
   875  	remoteCommitKeys *CommitmentKeyRing, isLocal bool) ([]PaymentDescriptor,
   876  	[]PaymentDescriptor, error) {
   877  
   878  	var (
   879  		incomingHtlcs []PaymentDescriptor
   880  		outgoingHtlcs []PaymentDescriptor
   881  	)
   882  
   883  	// For each included HTLC within this commitment state, we'll convert
   884  	// the disk format into our in memory PaymentDescriptor format,
   885  	// partitioning based on if we offered or received the HTLC.
   886  	for _, htlc := range htlcs {
   887  		// TODO(roasbeef): set isForwarded to false for all? need to
   888  		// persist state w.r.t to if forwarded or not, or can
   889  		// inadvertently trigger replays
   890  
   891  		payDesc, err := lc.diskHtlcToPayDesc(
   892  			feeRate, commitHeight, &htlc,
   893  			localCommitKeys, remoteCommitKeys,
   894  			isLocal,
   895  		)
   896  		if err != nil {
   897  			return incomingHtlcs, outgoingHtlcs, err
   898  		}
   899  
   900  		if htlc.Incoming {
   901  			incomingHtlcs = append(incomingHtlcs, payDesc)
   902  		} else {
   903  			outgoingHtlcs = append(outgoingHtlcs, payDesc)
   904  		}
   905  	}
   906  
   907  	return incomingHtlcs, outgoingHtlcs, nil
   908  }
   909  
   910  // diskCommitToMemCommit converts the on-disk commitment format to our
   911  // in-memory commitment format which is needed in order to properly resume
   912  // channel operations after a restart.
   913  func (lc *LightningChannel) diskCommitToMemCommit(isLocal bool,
   914  	diskCommit *channeldb.ChannelCommitment, localCommitPoint,
   915  	remoteCommitPoint *secp256k1.PublicKey) (*commitment, error) {
   916  
   917  	// First, we'll need to re-derive the commitment key ring for each
   918  	// party used within this particular state. If this is a pending commit
   919  	// (we extended but weren't able to complete the commitment dance
   920  	// before shutdown), then the localCommitPoint won't be set as we
   921  	// haven't yet received a responding commitment from the remote party.
   922  	var localCommitKeys, remoteCommitKeys *CommitmentKeyRing
   923  	if localCommitPoint != nil {
   924  		localCommitKeys = DeriveCommitmentKeys(
   925  			localCommitPoint, true, lc.channelState.ChanType,
   926  			&lc.channelState.LocalChanCfg,
   927  			&lc.channelState.RemoteChanCfg,
   928  		)
   929  	}
   930  	if remoteCommitPoint != nil {
   931  		remoteCommitKeys = DeriveCommitmentKeys(
   932  			remoteCommitPoint, false, lc.channelState.ChanType,
   933  			&lc.channelState.LocalChanCfg,
   934  			&lc.channelState.RemoteChanCfg,
   935  		)
   936  	}
   937  
   938  	// With the key rings re-created, we'll now convert all the on-disk
   939  	// HTLC"s into PaymentDescriptor's so we can re-insert them into our
   940  	// update log.
   941  	incomingHtlcs, outgoingHtlcs, err := lc.extractPayDescs(
   942  		diskCommit.CommitHeight,
   943  		chainfee.AtomPerKByte(diskCommit.FeePerKB),
   944  		diskCommit.Htlcs, localCommitKeys, remoteCommitKeys,
   945  		isLocal,
   946  	)
   947  	if err != nil {
   948  		return nil, err
   949  	}
   950  
   951  	// With the necessary items generated, we'll now re-construct the
   952  	// commitment state as it was originally present in memory.
   953  	commit := &commitment{
   954  		height:            diskCommit.CommitHeight,
   955  		isOurs:            isLocal,
   956  		ourBalance:        diskCommit.LocalBalance,
   957  		theirBalance:      diskCommit.RemoteBalance,
   958  		ourMessageIndex:   diskCommit.LocalLogIndex,
   959  		ourHtlcIndex:      diskCommit.LocalHtlcIndex,
   960  		theirMessageIndex: diskCommit.RemoteLogIndex,
   961  		theirHtlcIndex:    diskCommit.RemoteHtlcIndex,
   962  		txn:               diskCommit.CommitTx,
   963  		sig:               diskCommit.CommitSig,
   964  		fee:               diskCommit.CommitFee,
   965  		feePerKB:          chainfee.AtomPerKByte(diskCommit.FeePerKB),
   966  		incomingHTLCs:     incomingHtlcs,
   967  		outgoingHTLCs:     outgoingHtlcs,
   968  	}
   969  	if isLocal {
   970  		commit.dustLimit = lc.channelState.LocalChanCfg.DustLimit
   971  	} else {
   972  		commit.dustLimit = lc.channelState.RemoteChanCfg.DustLimit
   973  	}
   974  
   975  	return commit, nil
   976  }
   977  
   978  // commitmentChain represents a chain of unrevoked commitments. The tail of the
   979  // chain is the latest fully signed, yet unrevoked commitment. Two chains are
   980  // tracked, one for the local node, and another for the remote node. New
   981  // commitments we create locally extend the remote node's chain, and vice
   982  // versa. Commitment chains are allowed to grow to a bounded length, after
   983  // which the tail needs to be "dropped" before new commitments can be received.
   984  // The tail is "dropped" when the owner of the chain sends a revocation for the
   985  // previous tail.
   986  type commitmentChain struct {
   987  	// commitments is a linked list of commitments to new states. New
   988  	// commitments are added to the end of the chain with increase height.
   989  	// Once a commitment transaction is revoked, the tail is incremented,
   990  	// freeing up the revocation window for new commitments.
   991  	commitments *list.List
   992  }
   993  
   994  // newCommitmentChain creates a new commitment chain.
   995  func newCommitmentChain() *commitmentChain {
   996  	return &commitmentChain{
   997  		commitments: list.New(),
   998  	}
   999  }
  1000  
  1001  // addCommitment extends the commitment chain by a single commitment. This
  1002  // added commitment represents a state update proposed by either party. Once
  1003  // the commitment prior to this commitment is revoked, the commitment becomes
  1004  // the new defacto state within the channel.
  1005  func (s *commitmentChain) addCommitment(c *commitment) {
  1006  	s.commitments.PushBack(c)
  1007  }
  1008  
  1009  // advanceTail reduces the length of the commitment chain by one. The tail of
  1010  // the chain should be advanced once a revocation for the lowest unrevoked
  1011  // commitment in the chain is received.
  1012  func (s *commitmentChain) advanceTail() {
  1013  	s.commitments.Remove(s.commitments.Front())
  1014  }
  1015  
  1016  // tip returns the latest commitment added to the chain.
  1017  func (s *commitmentChain) tip() *commitment {
  1018  	return s.commitments.Back().Value.(*commitment)
  1019  }
  1020  
  1021  // tail returns the lowest unrevoked commitment transaction in the chain.
  1022  func (s *commitmentChain) tail() *commitment {
  1023  	return s.commitments.Front().Value.(*commitment)
  1024  }
  1025  
  1026  // hasUnackedCommitment returns true if the commitment chain has more than one
  1027  // entry. The tail of the commitment chain has been ACKed by revoking all prior
  1028  // commitments, but any subsequent commitments have not yet been ACKed.
  1029  func (s *commitmentChain) hasUnackedCommitment() bool {
  1030  	return s.commitments.Front() != s.commitments.Back()
  1031  }
  1032  
  1033  // updateLog is an append-only log that stores updates to a node's commitment
  1034  // chain. This structure can be seen as the "mempool" within Lightning where
  1035  // changes are stored before they're committed to the chain. Once an entry has
  1036  // been committed in both the local and remote commitment chain, then it can be
  1037  // removed from this log.
  1038  //
  1039  // TODO(roasbeef): create lightning package, move commitment and update to
  1040  // package?
  1041  //   - also move state machine, separate from lnwallet package
  1042  //   - possible embed updateLog within commitmentChain.
  1043  type updateLog struct {
  1044  	// logIndex is a monotonically increasing integer that tracks the total
  1045  	// number of update entries ever applied to the log. When sending new
  1046  	// commitment states, we include all updates up to this index.
  1047  	logIndex uint64
  1048  
  1049  	// htlcCounter is a monotonically increasing integer that tracks the
  1050  	// total number of offered HTLC's by the owner of this update log. We
  1051  	// use a distinct index for this purpose, as update's that remove
  1052  	// entries from the log will be indexed using this counter.
  1053  	htlcCounter uint64
  1054  
  1055  	// List is the updatelog itself, we embed this value so updateLog has
  1056  	// access to all the method of a list.List.
  1057  	*list.List
  1058  
  1059  	// updateIndex is an index that maps a particular entries index to the
  1060  	// list element within the list.List above.
  1061  	updateIndex map[uint64]*list.Element
  1062  
  1063  	// offerIndex is an index that maps the counter for offered HTLC's to
  1064  	// their list element within the main list.List.
  1065  	htlcIndex map[uint64]*list.Element
  1066  
  1067  	// modifiedHtlcs is a set that keeps track of all the current modified
  1068  	// htlcs. A modified HTLC is one that's present in the log, and has as
  1069  	// a pending fail or settle that's attempting to consume it.
  1070  	modifiedHtlcs map[uint64]struct{}
  1071  }
  1072  
  1073  // newUpdateLog creates a new updateLog instance.
  1074  func newUpdateLog(logIndex, htlcCounter uint64) *updateLog {
  1075  	return &updateLog{
  1076  		List:          list.New(),
  1077  		updateIndex:   make(map[uint64]*list.Element),
  1078  		htlcIndex:     make(map[uint64]*list.Element),
  1079  		logIndex:      logIndex,
  1080  		htlcCounter:   htlcCounter,
  1081  		modifiedHtlcs: make(map[uint64]struct{}),
  1082  	}
  1083  }
  1084  
  1085  // restoreHtlc will "restore" a prior HTLC to the updateLog. We say restore as
  1086  // this method is intended to be used when re-covering a prior commitment
  1087  // state. This function differs from appendHtlc in that it won't increment
  1088  // either of log's counters. If the HTLC is already present, then it is
  1089  // ignored.
  1090  func (u *updateLog) restoreHtlc(pd *PaymentDescriptor) {
  1091  	if _, ok := u.htlcIndex[pd.HtlcIndex]; ok {
  1092  		return
  1093  	}
  1094  
  1095  	u.htlcIndex[pd.HtlcIndex] = u.PushBack(pd)
  1096  }
  1097  
  1098  // appendUpdate appends a new update to the tip of the updateLog. The entry is
  1099  // also added to index accordingly.
  1100  func (u *updateLog) appendUpdate(pd *PaymentDescriptor) {
  1101  	u.updateIndex[u.logIndex] = u.PushBack(pd)
  1102  	u.logIndex++
  1103  }
  1104  
  1105  // restoreUpdate appends a new update to the tip of the updateLog. The entry is
  1106  // also added to index accordingly. This function differs from appendUpdate in
  1107  // that it won't increment the log index counter.
  1108  func (u *updateLog) restoreUpdate(pd *PaymentDescriptor) {
  1109  	u.updateIndex[pd.LogIndex] = u.PushBack(pd)
  1110  }
  1111  
  1112  // appendHtlc appends a new HTLC offer to the tip of the update log. The entry
  1113  // is also added to the offer index accordingly.
  1114  func (u *updateLog) appendHtlc(pd *PaymentDescriptor) {
  1115  	u.htlcIndex[u.htlcCounter] = u.PushBack(pd)
  1116  	u.htlcCounter++
  1117  
  1118  	u.logIndex++
  1119  }
  1120  
  1121  // lookupHtlc attempts to look up an offered HTLC according to its offer
  1122  // index. If the entry isn't found, then a nil pointer is returned.
  1123  func (u *updateLog) lookupHtlc(i uint64) *PaymentDescriptor {
  1124  	htlc, ok := u.htlcIndex[i]
  1125  	if !ok {
  1126  		return nil
  1127  	}
  1128  
  1129  	return htlc.Value.(*PaymentDescriptor)
  1130  }
  1131  
  1132  // remove attempts to remove an entry from the update log. If the entry is
  1133  // found, then the entry will be removed from the update log and index.
  1134  func (u *updateLog) removeUpdate(i uint64) {
  1135  	entry := u.updateIndex[i]
  1136  	u.Remove(entry)
  1137  	delete(u.updateIndex, i)
  1138  }
  1139  
  1140  // removeHtlc attempts to remove an HTLC offer form the update log. If the
  1141  // entry is found, then the entry will be removed from both the main log and
  1142  // the offer index.
  1143  func (u *updateLog) removeHtlc(i uint64) {
  1144  	entry := u.htlcIndex[i]
  1145  	u.Remove(entry)
  1146  	delete(u.htlcIndex, i)
  1147  
  1148  	delete(u.modifiedHtlcs, i)
  1149  }
  1150  
  1151  // htlcHasModification returns true if the HTLC identified by the passed index
  1152  // has a pending modification within the log.
  1153  func (u *updateLog) htlcHasModification(i uint64) bool {
  1154  	_, o := u.modifiedHtlcs[i]
  1155  	return o
  1156  }
  1157  
  1158  // markHtlcModified marks an HTLC as modified based on its HTLC index. After a
  1159  // call to this method, htlcHasModification will return true until the HTLC is
  1160  // removed.
  1161  func (u *updateLog) markHtlcModified(i uint64) {
  1162  	u.modifiedHtlcs[i] = struct{}{}
  1163  }
  1164  
  1165  // compactLogs performs garbage collection within the log removing HTLCs which
  1166  // have been removed from the point-of-view of the tail of both chains. The
  1167  // entries which timeout/settle HTLCs are also removed.
  1168  func compactLogs(ourLog, theirLog *updateLog,
  1169  	localChainTail, remoteChainTail uint64) {
  1170  
  1171  	compactLog := func(logA, logB *updateLog) {
  1172  		var nextA *list.Element
  1173  		for e := logA.Front(); e != nil; e = nextA {
  1174  			// Assign next iteration element at top of loop because
  1175  			// we may remove the current element from the list,
  1176  			// which can change the iterated sequence.
  1177  			nextA = e.Next()
  1178  
  1179  			htlc := e.Value.(*PaymentDescriptor)
  1180  
  1181  			// We skip Adds, as they will be removed along with the
  1182  			// fail/settles below.
  1183  			if htlc.EntryType == Add {
  1184  				continue
  1185  			}
  1186  
  1187  			// If the HTLC hasn't yet been removed from either
  1188  			// chain, the skip it.
  1189  			if htlc.removeCommitHeightRemote == 0 ||
  1190  				htlc.removeCommitHeightLocal == 0 {
  1191  				continue
  1192  			}
  1193  
  1194  			// Otherwise if the height of the tail of both chains
  1195  			// is at least the height in which the HTLC was
  1196  			// removed, then evict the settle/timeout entry along
  1197  			// with the original add entry.
  1198  			if remoteChainTail >= htlc.removeCommitHeightRemote &&
  1199  				localChainTail >= htlc.removeCommitHeightLocal {
  1200  
  1201  				// Fee updates have no parent htlcs, so we only
  1202  				// remove the update itself.
  1203  				if htlc.EntryType == FeeUpdate {
  1204  					logA.removeUpdate(htlc.LogIndex)
  1205  					continue
  1206  				}
  1207  
  1208  				// The other types (fail/settle) do have a
  1209  				// parent HTLC, so we'll remove that HTLC from
  1210  				// the other log.
  1211  				logA.removeUpdate(htlc.LogIndex)
  1212  				logB.removeHtlc(htlc.ParentIndex)
  1213  			}
  1214  
  1215  		}
  1216  	}
  1217  
  1218  	compactLog(ourLog, theirLog)
  1219  	compactLog(theirLog, ourLog)
  1220  }
  1221  
  1222  // LightningChannel implements the state machine which corresponds to the
  1223  // current commitment protocol wire spec. The state machine implemented allows
  1224  // for asynchronous fully desynchronized, batched+pipelined updates to
  1225  // commitment transactions allowing for a high degree of non-blocking
  1226  // bi-directional payment throughput.
  1227  //
  1228  // In order to allow updates to be fully non-blocking, either side is able to
  1229  // create multiple new commitment states up to a pre-determined window size.
  1230  // This window size is encoded within InitialRevocationWindow. Before the start
  1231  // of a session, both side should send out revocation messages with nil
  1232  // preimages in order to populate their revocation window for the remote party.
  1233  //
  1234  // The state machine has for main methods:
  1235  //   - .SignNextCommitment()
  1236  //   - Called one one wishes to sign the next commitment, either initiating a
  1237  //     new state update, or responding to a received commitment.
  1238  //   - .ReceiveNewCommitment()
  1239  //   - Called upon receipt of a new commitment from the remote party. If the
  1240  //     new commitment is valid, then a revocation should immediately be
  1241  //     generated and sent.
  1242  //   - .RevokeCurrentCommitment()
  1243  //   - Revokes the current commitment. Should be called directly after
  1244  //     receiving a new commitment.
  1245  //   - .ReceiveRevocation()
  1246  //   - Processes a revocation from the remote party. If successful creates a
  1247  //     new defacto broadcastable state.
  1248  //
  1249  // See the individual comments within the above methods for further details.
  1250  type LightningChannel struct {
  1251  	// input.Signer is the main signer instances that will be responsible for
  1252  	// signing any HTLC and commitment transaction generated by the state
  1253  	// machine.
  1254  	Signer input.Signer
  1255  
  1256  	// signDesc is the primary sign descriptor that is capable of signing
  1257  	// the commitment transaction that spends the multi-sig output.
  1258  	signDesc *input.SignDescriptor
  1259  
  1260  	status channelState
  1261  
  1262  	// ChanPoint is the funding outpoint of this channel.
  1263  	ChanPoint *wire.OutPoint
  1264  
  1265  	// sigPool is a pool of workers that are capable of signing and
  1266  	// validating signatures in parallel. This is utilized as an
  1267  	// optimization to void serially signing or validating the HTLC
  1268  	// signatures, of which there may be hundreds.
  1269  	sigPool *SigPool
  1270  
  1271  	// Capacity is the total capacity of this channel.
  1272  	Capacity dcrutil.Amount
  1273  
  1274  	// currentHeight is the current height of our local commitment chain.
  1275  	// This is also the same as the number of updates to the channel we've
  1276  	// accepted.
  1277  	currentHeight uint64
  1278  
  1279  	// remoteCommitChain is the remote node's commitment chain. Any new
  1280  	// commitments we initiate are added to the tip of this chain.
  1281  	remoteCommitChain *commitmentChain
  1282  
  1283  	// localCommitChain is our local commitment chain. Any new commitments
  1284  	// received are added to the tip of this chain. The tail (or lowest
  1285  	// height) in this chain is our current accepted state, which we are
  1286  	// able to broadcast safely.
  1287  	localCommitChain *commitmentChain
  1288  
  1289  	channelState *channeldb.OpenChannel
  1290  
  1291  	commitBuilder *CommitmentBuilder
  1292  
  1293  	// [local|remote]Log is a (mostly) append-only log storing all the HTLC
  1294  	// updates to this channel. The log is walked backwards as HTLC updates
  1295  	// are applied in order to re-construct a commitment transaction from a
  1296  	// commitment. The log is compacted once a revocation is received.
  1297  	localUpdateLog  *updateLog
  1298  	remoteUpdateLog *updateLog
  1299  
  1300  	// LocalFundingKey is the public key under control by the wallet that
  1301  	// was used for the 2-of-2 funding output which created this channel.
  1302  	LocalFundingKey *secp256k1.PublicKey
  1303  
  1304  	// RemoteFundingKey is the public key for the remote channel counter
  1305  	// party  which used for the 2-of-2 funding output which created this
  1306  	// channel.
  1307  	RemoteFundingKey *secp256k1.PublicKey
  1308  
  1309  	netParams *chaincfg.Params
  1310  
  1311  	// log is a channel-specific logging instance.
  1312  	log slog.Logger
  1313  
  1314  	sync.RWMutex
  1315  }
  1316  
  1317  // NewLightningChannel creates a new, active payment channel given an
  1318  // implementation of the chain notifier, channel database, and the current
  1319  // settled channel state. Throughout state transitions, then channel will
  1320  // automatically persist pertinent state to the database in an efficient
  1321  // manner.
  1322  func NewLightningChannel(signer input.Signer,
  1323  	state *channeldb.OpenChannel,
  1324  	sigPool *SigPool,
  1325  	netParams *chaincfg.Params) (*LightningChannel, error) {
  1326  
  1327  	localCommit := state.LocalCommitment
  1328  	remoteCommit := state.RemoteCommitment
  1329  
  1330  	// First, initialize the update logs with their current counter values
  1331  	// from the local and remote commitments.
  1332  	localUpdateLog := newUpdateLog(
  1333  		remoteCommit.LocalLogIndex, remoteCommit.LocalHtlcIndex,
  1334  	)
  1335  	remoteUpdateLog := newUpdateLog(
  1336  		localCommit.RemoteLogIndex, localCommit.RemoteHtlcIndex,
  1337  	)
  1338  
  1339  	logPrefix := fmt.Sprintf("ChannelPoint(%v):", state.FundingOutpoint)
  1340  
  1341  	lc := &LightningChannel{
  1342  		Signer:            signer,
  1343  		sigPool:           sigPool,
  1344  		currentHeight:     localCommit.CommitHeight,
  1345  		remoteCommitChain: newCommitmentChain(),
  1346  		localCommitChain:  newCommitmentChain(),
  1347  		channelState:      state,
  1348  		commitBuilder:     NewCommitmentBuilder(state, chaincfg.SimNetParams()), // TODO: pass the same netParams as below
  1349  		localUpdateLog:    localUpdateLog,
  1350  		remoteUpdateLog:   remoteUpdateLog,
  1351  		ChanPoint:         &state.FundingOutpoint,
  1352  		Capacity:          state.Capacity,
  1353  		LocalFundingKey:   state.LocalChanCfg.MultiSigKey.PubKey,
  1354  		RemoteFundingKey:  state.RemoteChanCfg.MultiSigKey.PubKey,
  1355  		netParams:         netParams,
  1356  		log:               build.NewPrefixLog(logPrefix, walletLog),
  1357  	}
  1358  
  1359  	// With the main channel struct reconstructed, we'll now restore the
  1360  	// commitment state in memory and also the update logs themselves.
  1361  	err := lc.restoreCommitState(&localCommit, &remoteCommit)
  1362  	if err != nil {
  1363  		return nil, err
  1364  	}
  1365  
  1366  	// Create the sign descriptor which we'll be using very frequently to
  1367  	// request a signature for the 2-of-2 multi-sig from the signer in
  1368  	// order to complete channel state transitions.
  1369  	if err := lc.createSignDesc(); err != nil {
  1370  		return nil, err
  1371  	}
  1372  
  1373  	return lc, nil
  1374  }
  1375  
  1376  // createSignDesc derives the input.SignDescriptor for commitment transactions from
  1377  // other fields on the LightningChannel.
  1378  func (lc *LightningChannel) createSignDesc() error {
  1379  	localKey := lc.channelState.LocalChanCfg.MultiSigKey.PubKey.
  1380  		SerializeCompressed()
  1381  	remoteKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey.
  1382  		SerializeCompressed()
  1383  
  1384  	multiSigScript, err := input.GenMultiSigScript(localKey, remoteKey)
  1385  	if err != nil {
  1386  		return err
  1387  	}
  1388  
  1389  	fundingPkScript, err := input.ScriptHashPkScript(multiSigScript)
  1390  	if err != nil {
  1391  		return err
  1392  	}
  1393  	lc.signDesc = &input.SignDescriptor{
  1394  		KeyDesc:       lc.channelState.LocalChanCfg.MultiSigKey,
  1395  		WitnessScript: multiSigScript,
  1396  		Output: &wire.TxOut{
  1397  			PkScript: fundingPkScript,
  1398  			Value:    int64(lc.channelState.Capacity),
  1399  		},
  1400  		HashType:   txscript.SigHashAll,
  1401  		InputIndex: 0,
  1402  	}
  1403  
  1404  	return nil
  1405  }
  1406  
  1407  // ResetState resets the state of the channel back to the default state. This
  1408  // ensures that any active goroutines which need to act based on on-chain
  1409  // events do so properly.
  1410  func (lc *LightningChannel) ResetState() {
  1411  	lc.Lock()
  1412  	lc.status = channelOpen
  1413  	lc.Unlock()
  1414  }
  1415  
  1416  // logUpdateToPayDesc converts a LogUpdate into a matching PaymentDescriptor
  1417  // entry that can be re-inserted into the update log. This method is used when
  1418  // we extended a state to the remote party, but the connection was obstructed
  1419  // before we could finish the commitment dance. In this case, we need to
  1420  // re-insert the original entries back into the update log so we can resume as
  1421  // if nothing happened.
  1422  func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
  1423  	remoteUpdateLog *updateLog, commitHeight uint64,
  1424  	feeRate chainfee.AtomPerKByte, remoteCommitKeys *CommitmentKeyRing,
  1425  	remoteDustLimit dcrutil.Amount) (*PaymentDescriptor, error) {
  1426  
  1427  	// Depending on the type of update message we'll map that to a distinct
  1428  	// PaymentDescriptor instance.
  1429  	var pd *PaymentDescriptor
  1430  
  1431  	switch wireMsg := logUpdate.UpdateMsg.(type) {
  1432  
  1433  	// For offered HTLC's, we'll map that to a PaymentDescriptor with the
  1434  	// type Add, ensuring we restore the necessary fields. From the PoV of
  1435  	// the commitment chain, this HTLC was included in the remote chain,
  1436  	// but not the local chain.
  1437  	case *lnwire.UpdateAddHTLC:
  1438  		// First, we'll map all the relevant fields in the
  1439  		// UpdateAddHTLC message to their corresponding fields in the
  1440  		// PaymentDescriptor struct. We also set addCommitHeightRemote
  1441  		// as we've included this HTLC in our local commitment chain
  1442  		// for the remote party.
  1443  		pd = &PaymentDescriptor{
  1444  			RHash:                 wireMsg.PaymentHash,
  1445  			Timeout:               wireMsg.Expiry,
  1446  			Amount:                wireMsg.Amount,
  1447  			EntryType:             Add,
  1448  			HtlcIndex:             wireMsg.ID,
  1449  			LogIndex:              logUpdate.LogIndex,
  1450  			addCommitHeightRemote: commitHeight,
  1451  		}
  1452  		pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
  1453  		copy(pd.OnionBlob, wireMsg.OnionBlob[:])
  1454  
  1455  		isDustRemote := HtlcIsDust(
  1456  			lc.channelState.ChanType, false, false, feeRate,
  1457  			wireMsg.Amount.ToAtoms(), remoteDustLimit,
  1458  		)
  1459  		if !isDustRemote {
  1460  			theirP2SH, theirWitnessScript, err := genHtlcScript(
  1461  				lc.channelState.ChanType, false, false,
  1462  				wireMsg.Expiry, wireMsg.PaymentHash,
  1463  				remoteCommitKeys,
  1464  			)
  1465  			if err != nil {
  1466  				return nil, err
  1467  			}
  1468  
  1469  			pd.theirPkScript = theirP2SH
  1470  			pd.theirWitnessScript = theirWitnessScript
  1471  		}
  1472  
  1473  	// For HTLCs we're offered we'll fetch the original offered HTLC
  1474  	// from the remote party's update log so we can retrieve the same
  1475  	// PaymentDescriptor that SettleHTLC would produce.
  1476  	case *lnwire.UpdateFulfillHTLC:
  1477  		ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
  1478  
  1479  		pd = &PaymentDescriptor{
  1480  			Amount:                   ogHTLC.Amount,
  1481  			RHash:                    ogHTLC.RHash,
  1482  			RPreimage:                wireMsg.PaymentPreimage,
  1483  			LogIndex:                 logUpdate.LogIndex,
  1484  			ParentIndex:              ogHTLC.HtlcIndex,
  1485  			EntryType:                Settle,
  1486  			removeCommitHeightRemote: commitHeight,
  1487  		}
  1488  
  1489  	// If we sent a failure for a prior incoming HTLC, then we'll consult
  1490  	// the update log of the remote party so we can retrieve the
  1491  	// information of the original HTLC we're failing. We also set the
  1492  	// removal height for the remote commitment.
  1493  	case *lnwire.UpdateFailHTLC:
  1494  		ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
  1495  
  1496  		pd = &PaymentDescriptor{
  1497  			Amount:                   ogHTLC.Amount,
  1498  			RHash:                    ogHTLC.RHash,
  1499  			ParentIndex:              ogHTLC.HtlcIndex,
  1500  			LogIndex:                 logUpdate.LogIndex,
  1501  			EntryType:                Fail,
  1502  			FailReason:               wireMsg.Reason[:],
  1503  			removeCommitHeightRemote: commitHeight,
  1504  		}
  1505  
  1506  	// HTLC fails due to malformed onion blobs are treated the exact same
  1507  	// way as regular HTLC fails.
  1508  	case *lnwire.UpdateFailMalformedHTLC:
  1509  		ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
  1510  		// TODO(roasbeef): err if nil?
  1511  
  1512  		pd = &PaymentDescriptor{
  1513  			Amount:                   ogHTLC.Amount,
  1514  			RHash:                    ogHTLC.RHash,
  1515  			ParentIndex:              ogHTLC.HtlcIndex,
  1516  			LogIndex:                 logUpdate.LogIndex,
  1517  			EntryType:                MalformedFail,
  1518  			FailCode:                 wireMsg.FailureCode,
  1519  			ShaOnionBlob:             wireMsg.ShaOnionBlob,
  1520  			removeCommitHeightRemote: commitHeight,
  1521  		}
  1522  
  1523  	// For fee updates we'll create a FeeUpdate type to add to the log. We
  1524  	// reuse the amount field to hold the fee rate. Since the amount field
  1525  	// is denominated in msat we won't lose precision when storing the
  1526  	// sat/kw denominated feerate. Note that we set both the add and remove
  1527  	// height to the same value, as we consider the fee update locked in by
  1528  	// adding and removing it at the same height.
  1529  	case *lnwire.UpdateFee:
  1530  		pd = &PaymentDescriptor{
  1531  			LogIndex: logUpdate.LogIndex,
  1532  			Amount: lnwire.NewMAtomsFromAtoms(
  1533  				dcrutil.Amount(wireMsg.FeePerKB),
  1534  			),
  1535  			EntryType:                FeeUpdate,
  1536  			addCommitHeightRemote:    commitHeight,
  1537  			removeCommitHeightRemote: commitHeight,
  1538  		}
  1539  	}
  1540  
  1541  	return pd, nil
  1542  }
  1543  
  1544  // localLogUpdateToPayDesc converts a LogUpdate into a matching PaymentDescriptor
  1545  // entry that can be re-inserted into the local update log. This method is used
  1546  // when we sent an update+sig, receive a revocation, but drop right before the
  1547  // counterparty can sign for the update we just sent. In this case, we need to
  1548  // re-insert the original entries back into the update log so we'll be expecting
  1549  // the peer to sign them. The height of the remote commitment is expected to be
  1550  // provided and we restore all log update entries with this height, even though
  1551  // the real height may be lower. In the way these fields are used elsewhere, this
  1552  // doesn't change anything.
  1553  func (lc *LightningChannel) localLogUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
  1554  	remoteUpdateLog *updateLog, commitHeight uint64) (*PaymentDescriptor,
  1555  	error) {
  1556  
  1557  	// Since Add updates aren't saved to disk under this key, the update will
  1558  	// never be an Add.
  1559  	switch wireMsg := logUpdate.UpdateMsg.(type) {
  1560  
  1561  	// For HTLCs that we settled, we'll fetch the original offered HTLC from
  1562  	// the remote update log so we can retrieve the same PaymentDescriptor that
  1563  	// ReceiveHTLCSettle would produce.
  1564  	case *lnwire.UpdateFulfillHTLC:
  1565  		ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
  1566  
  1567  		return &PaymentDescriptor{
  1568  			Amount:                   ogHTLC.Amount,
  1569  			RHash:                    ogHTLC.RHash,
  1570  			RPreimage:                wireMsg.PaymentPreimage,
  1571  			LogIndex:                 logUpdate.LogIndex,
  1572  			ParentIndex:              ogHTLC.HtlcIndex,
  1573  			EntryType:                Settle,
  1574  			removeCommitHeightRemote: commitHeight,
  1575  		}, nil
  1576  
  1577  	// If we sent a failure for a prior incoming HTLC, then we'll consult the
  1578  	// remote update log so we can retrieve the information of the original
  1579  	// HTLC we're failing.
  1580  	case *lnwire.UpdateFailHTLC:
  1581  		ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
  1582  
  1583  		return &PaymentDescriptor{
  1584  			Amount:                   ogHTLC.Amount,
  1585  			RHash:                    ogHTLC.RHash,
  1586  			ParentIndex:              ogHTLC.HtlcIndex,
  1587  			LogIndex:                 logUpdate.LogIndex,
  1588  			EntryType:                Fail,
  1589  			FailReason:               wireMsg.Reason[:],
  1590  			removeCommitHeightRemote: commitHeight,
  1591  		}, nil
  1592  
  1593  	// HTLC fails due to malformed onion blocks are treated the exact same
  1594  	// way as regular HTLC fails.
  1595  	case *lnwire.UpdateFailMalformedHTLC:
  1596  		ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
  1597  
  1598  		return &PaymentDescriptor{
  1599  			Amount:                   ogHTLC.Amount,
  1600  			RHash:                    ogHTLC.RHash,
  1601  			ParentIndex:              ogHTLC.HtlcIndex,
  1602  			LogIndex:                 logUpdate.LogIndex,
  1603  			EntryType:                MalformedFail,
  1604  			FailCode:                 wireMsg.FailureCode,
  1605  			ShaOnionBlob:             wireMsg.ShaOnionBlob,
  1606  			removeCommitHeightRemote: commitHeight,
  1607  		}, nil
  1608  
  1609  	case *lnwire.UpdateFee:
  1610  		return &PaymentDescriptor{
  1611  			LogIndex: logUpdate.LogIndex,
  1612  			Amount: lnwire.NewMAtomsFromAtoms(
  1613  				dcrutil.Amount(wireMsg.FeePerKB),
  1614  			),
  1615  			EntryType:                FeeUpdate,
  1616  			addCommitHeightRemote:    commitHeight,
  1617  			removeCommitHeightRemote: commitHeight,
  1618  		}, nil
  1619  
  1620  	default:
  1621  		return nil, fmt.Errorf("unknown message type: %T", wireMsg)
  1622  	}
  1623  }
  1624  
  1625  // remoteLogUpdateToPayDesc converts a LogUpdate into a matching
  1626  // PaymentDescriptor entry that can be re-inserted into the update log. This
  1627  // method is used when we revoked a local commitment, but the connection was
  1628  // obstructed before we could sign a remote commitment that contains these
  1629  // updates. In this case, we need to re-insert the original entries back into
  1630  // the update log so we can resume as if nothing happened. The height of the
  1631  // latest local commitment is also expected to be provided. We are restoring all
  1632  // log update entries with this height, even though the real commitment height
  1633  // may be lower. In the way these fields are used elsewhere, this doesn't change
  1634  // anything.
  1635  func (lc *LightningChannel) remoteLogUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
  1636  	localUpdateLog *updateLog, commitHeight uint64) (*PaymentDescriptor,
  1637  	error) {
  1638  
  1639  	switch wireMsg := logUpdate.UpdateMsg.(type) {
  1640  
  1641  	case *lnwire.UpdateAddHTLC:
  1642  		pd := &PaymentDescriptor{
  1643  			RHash:                wireMsg.PaymentHash,
  1644  			Timeout:              wireMsg.Expiry,
  1645  			Amount:               wireMsg.Amount,
  1646  			EntryType:            Add,
  1647  			HtlcIndex:            wireMsg.ID,
  1648  			LogIndex:             logUpdate.LogIndex,
  1649  			addCommitHeightLocal: commitHeight,
  1650  		}
  1651  		pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
  1652  		copy(pd.OnionBlob, wireMsg.OnionBlob[:])
  1653  
  1654  		// We don't need to generate an htlc script yet. This will be
  1655  		// done once we sign our remote commitment.
  1656  
  1657  		return pd, nil
  1658  
  1659  	// For HTLCs that the remote party settled, we'll fetch the original
  1660  	// offered HTLC from the local update log so we can retrieve the same
  1661  	// PaymentDescriptor that ReceiveHTLCSettle would produce.
  1662  	case *lnwire.UpdateFulfillHTLC:
  1663  		ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID)
  1664  
  1665  		return &PaymentDescriptor{
  1666  			Amount:                  ogHTLC.Amount,
  1667  			RHash:                   ogHTLC.RHash,
  1668  			RPreimage:               wireMsg.PaymentPreimage,
  1669  			LogIndex:                logUpdate.LogIndex,
  1670  			ParentIndex:             ogHTLC.HtlcIndex,
  1671  			EntryType:               Settle,
  1672  			removeCommitHeightLocal: commitHeight,
  1673  		}, nil
  1674  
  1675  	// If we received a failure for a prior outgoing HTLC, then we'll
  1676  	// consult the local update log so we can retrieve the information of
  1677  	// the original HTLC we're failing.
  1678  	case *lnwire.UpdateFailHTLC:
  1679  		ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID)
  1680  
  1681  		return &PaymentDescriptor{
  1682  			Amount:                  ogHTLC.Amount,
  1683  			RHash:                   ogHTLC.RHash,
  1684  			ParentIndex:             ogHTLC.HtlcIndex,
  1685  			LogIndex:                logUpdate.LogIndex,
  1686  			EntryType:               Fail,
  1687  			FailReason:              wireMsg.Reason[:],
  1688  			removeCommitHeightLocal: commitHeight,
  1689  		}, nil
  1690  
  1691  	// HTLC fails due to malformed onion blobs are treated the exact same
  1692  	// way as regular HTLC fails.
  1693  	case *lnwire.UpdateFailMalformedHTLC:
  1694  		ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID)
  1695  
  1696  		return &PaymentDescriptor{
  1697  			Amount:                  ogHTLC.Amount,
  1698  			RHash:                   ogHTLC.RHash,
  1699  			ParentIndex:             ogHTLC.HtlcIndex,
  1700  			LogIndex:                logUpdate.LogIndex,
  1701  			EntryType:               MalformedFail,
  1702  			FailCode:                wireMsg.FailureCode,
  1703  			ShaOnionBlob:            wireMsg.ShaOnionBlob,
  1704  			removeCommitHeightLocal: commitHeight,
  1705  		}, nil
  1706  
  1707  	// For fee updates we'll create a FeeUpdate type to add to the log. We
  1708  	// reuse the amount field to hold the fee rate. Since the amount field
  1709  	// is denominated in msat we won't lose precision when storing the
  1710  	// sat/kw denominated feerate. Note that we set both the add and remove
  1711  	// height to the same value, as we consider the fee update locked in by
  1712  	// adding and removing it at the same height.
  1713  	case *lnwire.UpdateFee:
  1714  		return &PaymentDescriptor{
  1715  			LogIndex: logUpdate.LogIndex,
  1716  			Amount: lnwire.NewMAtomsFromAtoms(
  1717  				dcrutil.Amount(wireMsg.FeePerKB),
  1718  			),
  1719  			EntryType:               FeeUpdate,
  1720  			addCommitHeightLocal:    commitHeight,
  1721  			removeCommitHeightLocal: commitHeight,
  1722  		}, nil
  1723  
  1724  	default:
  1725  		return nil, errors.New("unknown message type")
  1726  	}
  1727  }
  1728  
  1729  // restoreCommitState will restore the local commitment chain and updateLog
  1730  // state to a consistent in-memory representation of the passed disk commitment.
  1731  // This method is to be used upon reconnection to our channel counter party.
  1732  // Once the connection has been established, we'll prepare our in memory state
  1733  // to re-sync states with the remote party, and also verify/extend new proposed
  1734  // commitment states.
  1735  func (lc *LightningChannel) restoreCommitState(
  1736  	localCommitState, remoteCommitState *channeldb.ChannelCommitment) error {
  1737  
  1738  	// In order to reconstruct the pkScripts on each of the pending HTLC
  1739  	// outputs (if any) we'll need to regenerate the current revocation for
  1740  	// this current un-revoked state as well as retrieve the current
  1741  	// revocation for the remote party.
  1742  	ourRevPreImage, err := lc.channelState.RevocationProducer.AtIndex(
  1743  		lc.currentHeight,
  1744  	)
  1745  	if err != nil {
  1746  		return err
  1747  	}
  1748  	localCommitPoint := input.ComputeCommitmentPoint(ourRevPreImage[:])
  1749  	remoteCommitPoint := lc.channelState.RemoteCurrentRevocation
  1750  
  1751  	// With the revocation state reconstructed, we can now convert the disk
  1752  	// commitment into our in-memory commitment format, inserting it into
  1753  	// the local commitment chain.
  1754  	localCommit, err := lc.diskCommitToMemCommit(
  1755  		true, localCommitState, localCommitPoint,
  1756  		remoteCommitPoint,
  1757  	)
  1758  	if err != nil {
  1759  		return err
  1760  	}
  1761  	lc.localCommitChain.addCommitment(localCommit)
  1762  
  1763  	lc.log.Tracef("starting local commitment: %v",
  1764  		newLogClosure(func() string {
  1765  			return spew.Sdump(cacheCommitmentTxHash(lc.localCommitChain.tail()))
  1766  		}),
  1767  	)
  1768  
  1769  	// We'll also do the same for the remote commitment chain.
  1770  	remoteCommit, err := lc.diskCommitToMemCommit(
  1771  		false, remoteCommitState, localCommitPoint,
  1772  		remoteCommitPoint,
  1773  	)
  1774  	if err != nil {
  1775  		return err
  1776  	}
  1777  	lc.remoteCommitChain.addCommitment(remoteCommit)
  1778  
  1779  	lc.log.Tracef("starting remote commitment: %v",
  1780  		newLogClosure(func() string {
  1781  			return spew.Sdump(cacheCommitmentTxHash(lc.remoteCommitChain.tail()))
  1782  		}),
  1783  	)
  1784  
  1785  	var (
  1786  		pendingRemoteCommit     *commitment
  1787  		pendingRemoteCommitDiff *channeldb.CommitDiff
  1788  		pendingRemoteKeyChain   *CommitmentKeyRing
  1789  	)
  1790  
  1791  	// Next, we'll check to see if we have an un-acked commitment state we
  1792  	// extended to the remote party but which was never ACK'd.
  1793  	pendingRemoteCommitDiff, err = lc.channelState.RemoteCommitChainTip()
  1794  	if err != nil && err != channeldb.ErrNoPendingCommit {
  1795  		return err
  1796  	}
  1797  
  1798  	if pendingRemoteCommitDiff != nil {
  1799  		// If we have a pending remote commitment, then we'll also
  1800  		// reconstruct the original commitment for that state,
  1801  		// inserting it into the remote party's commitment chain. We
  1802  		// don't pass our commit point as we don't have the
  1803  		// corresponding state for the local commitment chain.
  1804  		pendingCommitPoint := lc.channelState.RemoteNextRevocation
  1805  		pendingRemoteCommit, err = lc.diskCommitToMemCommit(
  1806  			false, &pendingRemoteCommitDiff.Commitment,
  1807  			nil, pendingCommitPoint,
  1808  		)
  1809  		if err != nil {
  1810  			return err
  1811  		}
  1812  		lc.remoteCommitChain.addCommitment(pendingRemoteCommit)
  1813  
  1814  		lc.log.Debugf("pending remote commitment: %v",
  1815  			newLogClosure(func() string {
  1816  				return spew.Sdump(cacheCommitmentTxHash(lc.remoteCommitChain.tip()))
  1817  			}),
  1818  		)
  1819  
  1820  		// We'll also re-create the set of commitment keys needed to
  1821  		// fully re-derive the state.
  1822  		pendingRemoteKeyChain = DeriveCommitmentKeys(
  1823  			pendingCommitPoint, false, lc.channelState.ChanType,
  1824  			&lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg,
  1825  		)
  1826  	} else {
  1827  		lc.log.Debugf("no pending remote commitment")
  1828  	}
  1829  
  1830  	// Fetch remote updates that we have acked but not yet signed for.
  1831  	unsignedAckedUpdates, err := lc.channelState.UnsignedAckedUpdates()
  1832  	if err != nil {
  1833  		return err
  1834  	}
  1835  
  1836  	// Fetch the local updates the peer still needs to sign for.
  1837  	remoteUnsignedLocalUpdates, err := lc.channelState.RemoteUnsignedLocalUpdates()
  1838  	if err != nil {
  1839  		return err
  1840  	}
  1841  
  1842  	lc.log.Debugf("%d unsignedAckedUpdateds, %d remoteUnsignedLocalUpdates",
  1843  		len(unsignedAckedUpdates), len(remoteUnsignedLocalUpdates))
  1844  
  1845  	// Finally, with the commitment states restored, we'll now restore the
  1846  	// state logs based on the current local+remote commit, and any pending
  1847  	// remote commit that exists.
  1848  	err = lc.restoreStateLogs(
  1849  		localCommit, remoteCommit, pendingRemoteCommit,
  1850  		pendingRemoteCommitDiff, pendingRemoteKeyChain,
  1851  		unsignedAckedUpdates, remoteUnsignedLocalUpdates,
  1852  	)
  1853  	if err != nil {
  1854  		return err
  1855  	}
  1856  
  1857  	return nil
  1858  }
  1859  
  1860  // restoreStateLogs runs through the current locked-in HTLCs from the point of
  1861  // view of the channel and insert corresponding log entries (both local and
  1862  // remote) for each HTLC read from disk. This method is required to sync the
  1863  // in-memory state of the state machine with that read from persistent storage.
  1864  func (lc *LightningChannel) restoreStateLogs(
  1865  	localCommitment, remoteCommitment, pendingRemoteCommit *commitment,
  1866  	pendingRemoteCommitDiff *channeldb.CommitDiff,
  1867  	pendingRemoteKeys *CommitmentKeyRing,
  1868  	unsignedAckedUpdates,
  1869  	remoteUnsignedLocalUpdates []channeldb.LogUpdate) error {
  1870  
  1871  	// We make a map of incoming HTLCs to the height of the remote
  1872  	// commitment they were first added, and outgoing HTLCs to the height
  1873  	// of the local commit they were first added. This will be used when we
  1874  	// restore the update logs below.
  1875  	incomingRemoteAddHeights := make(map[uint64]uint64)
  1876  	outgoingLocalAddHeights := make(map[uint64]uint64)
  1877  
  1878  	// We start by setting the height of the incoming HTLCs on the pending
  1879  	// remote commitment. We set these heights first since if there are
  1880  	// duplicates, these will be overwritten by the lower height of the
  1881  	// remoteCommitment below.
  1882  	if pendingRemoteCommit != nil {
  1883  		for _, r := range pendingRemoteCommit.incomingHTLCs {
  1884  			incomingRemoteAddHeights[r.HtlcIndex] =
  1885  				pendingRemoteCommit.height
  1886  		}
  1887  	}
  1888  
  1889  	// Now set the remote commit height of all incoming HTLCs found on the
  1890  	// remote commitment.
  1891  	for _, r := range remoteCommitment.incomingHTLCs {
  1892  		incomingRemoteAddHeights[r.HtlcIndex] = remoteCommitment.height
  1893  	}
  1894  
  1895  	// And finally we can do the same for the outgoing HTLCs.
  1896  	for _, l := range localCommitment.outgoingHTLCs {
  1897  		outgoingLocalAddHeights[l.HtlcIndex] = localCommitment.height
  1898  	}
  1899  
  1900  	// If we have any unsigned acked updates to sign for, then the add is no
  1901  	// longer on our local commitment, but is still on the remote's commitment.
  1902  	// <---fail---
  1903  	// <---sig----
  1904  	// ----rev--->
  1905  	// To ensure proper channel operation, we restore the add's addCommitHeightLocal
  1906  	// field to the height of our local commitment.
  1907  	for _, logUpdate := range unsignedAckedUpdates {
  1908  
  1909  		var htlcIdx uint64
  1910  		switch wireMsg := logUpdate.UpdateMsg.(type) {
  1911  		case *lnwire.UpdateFulfillHTLC:
  1912  			htlcIdx = wireMsg.ID
  1913  		case *lnwire.UpdateFailHTLC:
  1914  			htlcIdx = wireMsg.ID
  1915  		case *lnwire.UpdateFailMalformedHTLC:
  1916  			htlcIdx = wireMsg.ID
  1917  		default:
  1918  			continue
  1919  		}
  1920  
  1921  		// The htlcIdx is stored in the map with the local commitment
  1922  		// height so the related add's addCommitHeightLocal field can be
  1923  		// restored.
  1924  		outgoingLocalAddHeights[htlcIdx] = localCommitment.height
  1925  	}
  1926  
  1927  	// If there are local updates that the peer needs to sign for, then the
  1928  	// corresponding add is no longer on the remote commitment, but is still on
  1929  	// our local commitment.
  1930  	// ----fail--->
  1931  	// ----sig---->
  1932  	// <---rev-----
  1933  	// To ensure proper channel operation, we restore the add's addCommitHeightRemote
  1934  	// field to the height of the remote commitment.
  1935  	for _, logUpdate := range remoteUnsignedLocalUpdates {
  1936  
  1937  		var htlcIdx uint64
  1938  		switch wireMsg := logUpdate.UpdateMsg.(type) {
  1939  		case *lnwire.UpdateFulfillHTLC:
  1940  			htlcIdx = wireMsg.ID
  1941  		case *lnwire.UpdateFailHTLC:
  1942  			htlcIdx = wireMsg.ID
  1943  		case *lnwire.UpdateFailMalformedHTLC:
  1944  			htlcIdx = wireMsg.ID
  1945  		default:
  1946  			continue
  1947  		}
  1948  
  1949  		// The htlcIdx is stored in the map with the remote commitment
  1950  		// height so the related add's addCommitHeightRemote field can be
  1951  		// restored.
  1952  		incomingRemoteAddHeights[htlcIdx] = remoteCommitment.height
  1953  	}
  1954  
  1955  	// For each incoming HTLC within the local commitment, we add it to the
  1956  	// remote update log. Since HTLCs are added first to the receiver's
  1957  	// commitment, we don't have to restore outgoing HTLCs, as they will be
  1958  	// restored from the remote commitment below.
  1959  	for i := range localCommitment.incomingHTLCs {
  1960  		htlc := localCommitment.incomingHTLCs[i]
  1961  
  1962  		// We'll need to set the add height of the HTLC. Since it is on
  1963  		// this local commit, we can use its height as local add
  1964  		// height. As remote add height we consult the incoming HTLC
  1965  		// map we created earlier. Note that if this HTLC is not in
  1966  		// incomingRemoteAddHeights, the remote add height will be set
  1967  		// to zero, which indicates that it is not added yet.
  1968  		htlc.addCommitHeightLocal = localCommitment.height
  1969  		htlc.addCommitHeightRemote = incomingRemoteAddHeights[htlc.HtlcIndex]
  1970  
  1971  		// Restore the htlc back to the remote log.
  1972  		lc.remoteUpdateLog.restoreHtlc(&htlc)
  1973  	}
  1974  
  1975  	// Similarly, we'll do the same for the outgoing HTLCs within the
  1976  	// remote commitment, adding them to the local update log.
  1977  	for i := range remoteCommitment.outgoingHTLCs {
  1978  		htlc := remoteCommitment.outgoingHTLCs[i]
  1979  
  1980  		// As for the incoming HTLCs, we'll use the current remote
  1981  		// commit height as remote add height, and consult the map
  1982  		// created above for the local add height.
  1983  		htlc.addCommitHeightRemote = remoteCommitment.height
  1984  		htlc.addCommitHeightLocal = outgoingLocalAddHeights[htlc.HtlcIndex]
  1985  
  1986  		// Restore the htlc back to the local log.
  1987  		lc.localUpdateLog.restoreHtlc(&htlc)
  1988  	}
  1989  
  1990  	// If we have a dangling (un-acked) commit for the remote party, then we
  1991  	// restore the updates leading up to this commit.
  1992  	if pendingRemoteCommit != nil {
  1993  		err := lc.restorePendingLocalUpdates(
  1994  			pendingRemoteCommitDiff, pendingRemoteKeys,
  1995  		)
  1996  		if err != nil {
  1997  			return err
  1998  		}
  1999  	}
  2000  
  2001  	// Restore unsigned acked remote log updates so that we can include them
  2002  	// in our next signature.
  2003  	err := lc.restorePendingRemoteUpdates(
  2004  		unsignedAckedUpdates, localCommitment.height,
  2005  		pendingRemoteCommit,
  2006  	)
  2007  	if err != nil {
  2008  		return err
  2009  	}
  2010  
  2011  	// Restore unsigned acked local log updates so we expect the peer to
  2012  	// sign for them.
  2013  	return lc.restorePeerLocalUpdates(
  2014  		remoteUnsignedLocalUpdates, remoteCommitment.height,
  2015  	)
  2016  }
  2017  
  2018  // restorePendingRemoteUpdates restores the acked remote log updates that we
  2019  // haven't yet signed for.
  2020  func (lc *LightningChannel) restorePendingRemoteUpdates(
  2021  	unsignedAckedUpdates []channeldb.LogUpdate,
  2022  	localCommitmentHeight uint64,
  2023  	pendingRemoteCommit *commitment) error {
  2024  
  2025  	lc.log.Debugf("Restoring %v dangling remote updates",
  2026  		len(unsignedAckedUpdates))
  2027  
  2028  	for _, logUpdate := range unsignedAckedUpdates {
  2029  		logUpdate := logUpdate
  2030  
  2031  		payDesc, err := lc.remoteLogUpdateToPayDesc(
  2032  			&logUpdate, lc.localUpdateLog, localCommitmentHeight,
  2033  		)
  2034  		if err != nil {
  2035  			return err
  2036  		}
  2037  
  2038  		logIdx := payDesc.LogIndex
  2039  
  2040  		// Sanity check that we are not restoring a remote log update
  2041  		// that we haven't received a sig for.
  2042  		if logIdx >= lc.remoteUpdateLog.logIndex {
  2043  			return fmt.Errorf("attempted to restore an "+
  2044  				"unsigned remote update: log_index=%v",
  2045  				logIdx)
  2046  		}
  2047  
  2048  		// We previously restored Adds along with all the other upates,
  2049  		// but this Add restoration was a no-op as every single one of
  2050  		// these Adds was already restored since they're all incoming
  2051  		// htlcs on the local commitment.
  2052  		if payDesc.EntryType == Add {
  2053  			continue
  2054  		}
  2055  
  2056  		var (
  2057  			height    uint64
  2058  			heightSet bool
  2059  		)
  2060  
  2061  		// If we have a pending commitment for them, and this update
  2062  		// is included in that commit, then we'll use this commitment
  2063  		// height as this commitment will include these updates for
  2064  		// their new remote commitment.
  2065  		if pendingRemoteCommit != nil {
  2066  			if logIdx < pendingRemoteCommit.theirMessageIndex {
  2067  				height = pendingRemoteCommit.height
  2068  				heightSet = true
  2069  			}
  2070  		}
  2071  
  2072  		// Insert the update into the log. The log update index doesn't
  2073  		// need to be incremented (hence the restore calls), because its
  2074  		// final value was properly persisted with the last local
  2075  		// commitment update.
  2076  		switch payDesc.EntryType {
  2077  		case FeeUpdate:
  2078  			if heightSet {
  2079  				payDesc.addCommitHeightRemote = height
  2080  				payDesc.removeCommitHeightRemote = height
  2081  			}
  2082  
  2083  			lc.remoteUpdateLog.restoreUpdate(payDesc)
  2084  
  2085  		default:
  2086  			if heightSet {
  2087  				payDesc.removeCommitHeightRemote = height
  2088  			}
  2089  
  2090  			lc.remoteUpdateLog.restoreUpdate(payDesc)
  2091  			lc.localUpdateLog.markHtlcModified(payDesc.ParentIndex)
  2092  		}
  2093  	}
  2094  
  2095  	return nil
  2096  }
  2097  
  2098  // restorePeerLocalUpdates restores the acked local log updates the peer still
  2099  // needs to sign for.
  2100  func (lc *LightningChannel) restorePeerLocalUpdates(updates []channeldb.LogUpdate,
  2101  	remoteCommitmentHeight uint64) error {
  2102  
  2103  	lc.log.Debugf("Restoring %v local updates that the peer should sign",
  2104  		len(updates))
  2105  
  2106  	for _, logUpdate := range updates {
  2107  		logUpdate := logUpdate
  2108  
  2109  		payDesc, err := lc.localLogUpdateToPayDesc(
  2110  			&logUpdate, lc.remoteUpdateLog, remoteCommitmentHeight,
  2111  		)
  2112  		if err != nil {
  2113  			return err
  2114  		}
  2115  
  2116  		lc.localUpdateLog.restoreUpdate(payDesc)
  2117  
  2118  		// Since Add updates are not stored and FeeUpdates don't have a
  2119  		// corresponding entry in the remote update log, we only need to
  2120  		// mark the htlc as modified if the update was Settle, Fail, or
  2121  		// MalformedFail.
  2122  		if payDesc.EntryType != FeeUpdate {
  2123  			lc.remoteUpdateLog.markHtlcModified(payDesc.ParentIndex)
  2124  		}
  2125  	}
  2126  
  2127  	return nil
  2128  }
  2129  
  2130  // restorePendingLocalUpdates restores the local log updates leading up to the
  2131  // given pending remote commitment.
  2132  func (lc *LightningChannel) restorePendingLocalUpdates(
  2133  	pendingRemoteCommitDiff *channeldb.CommitDiff,
  2134  	pendingRemoteKeys *CommitmentKeyRing) error {
  2135  
  2136  	pendingCommit := pendingRemoteCommitDiff.Commitment
  2137  	pendingHeight := pendingCommit.CommitHeight
  2138  
  2139  	// If we did have a dangling commit, then we'll examine which updates
  2140  	// we included in that state and re-insert them into our update log.
  2141  	for _, logUpdate := range pendingRemoteCommitDiff.LogUpdates {
  2142  		logUpdate := logUpdate
  2143  
  2144  		payDesc, err := lc.logUpdateToPayDesc(
  2145  			&logUpdate, lc.remoteUpdateLog, pendingHeight,
  2146  			chainfee.AtomPerKByte(pendingCommit.FeePerKB),
  2147  			pendingRemoteKeys,
  2148  			lc.channelState.RemoteChanCfg.DustLimit,
  2149  		)
  2150  		if err != nil {
  2151  			return err
  2152  		}
  2153  
  2154  		// Earlier versions did not write the log index to disk for fee
  2155  		// updates, so they will be unset. To account for this we set
  2156  		// them to to current update log index.
  2157  		if payDesc.EntryType == FeeUpdate && payDesc.LogIndex == 0 &&
  2158  			lc.localUpdateLog.logIndex > 0 {
  2159  
  2160  			payDesc.LogIndex = lc.localUpdateLog.logIndex
  2161  			lc.log.Debugf("Found FeeUpdate on "+
  2162  				"pendingRemoteCommitDiff without logIndex, "+
  2163  				"using %v", payDesc.LogIndex)
  2164  		}
  2165  
  2166  		// At this point the restored update's logIndex must be equal
  2167  		// to the update log, otherwise somthing is horribly wrong.
  2168  		if payDesc.LogIndex != lc.localUpdateLog.logIndex {
  2169  			panic(fmt.Sprintf("log index mismatch: "+
  2170  				"%v vs %v", payDesc.LogIndex,
  2171  				lc.localUpdateLog.logIndex))
  2172  		}
  2173  
  2174  		switch payDesc.EntryType {
  2175  		case Add:
  2176  			// The HtlcIndex of the added HTLC _must_ be equal to
  2177  			// the log's htlcCounter at this point. If it is not we
  2178  			// panic to catch this.
  2179  			// TODO(halseth): remove when cause of htlc entry bug
  2180  			// is found.
  2181  			if payDesc.HtlcIndex != lc.localUpdateLog.htlcCounter {
  2182  				panic(fmt.Sprintf("htlc index mismatch: "+
  2183  					"%v vs %v", payDesc.HtlcIndex,
  2184  					lc.localUpdateLog.htlcCounter))
  2185  			}
  2186  
  2187  			lc.localUpdateLog.appendHtlc(payDesc)
  2188  
  2189  		case FeeUpdate:
  2190  			lc.localUpdateLog.appendUpdate(payDesc)
  2191  
  2192  		default:
  2193  			lc.localUpdateLog.appendUpdate(payDesc)
  2194  
  2195  			lc.remoteUpdateLog.markHtlcModified(payDesc.ParentIndex)
  2196  		}
  2197  	}
  2198  
  2199  	return nil
  2200  }
  2201  
  2202  // HtlcRetribution contains all the items necessary to seep a revoked HTLC
  2203  // transaction from a revoked commitment transaction broadcast by the remote
  2204  // party.
  2205  type HtlcRetribution struct {
  2206  	// SignDesc is a design descriptor capable of generating the necessary
  2207  	// signatures to satisfy the revocation clause of the HTLC's public key
  2208  	// script.
  2209  	SignDesc input.SignDescriptor
  2210  
  2211  	// OutPoint is the target outpoint of this HTLC pointing to the
  2212  	// breached commitment transaction.
  2213  	OutPoint wire.OutPoint
  2214  
  2215  	// SecondLevelWitnessScript is the witness script that will be created
  2216  	// if the second level HTLC transaction for this output is
  2217  	// broadcast/confirmed. We provide this as if the remote party attempts
  2218  	// to go to the second level to claim the HTLC then we'll need to
  2219  	// update the SignDesc above accordingly to sweep properly.
  2220  	SecondLevelWitnessScript []byte
  2221  
  2222  	// IsIncoming is a boolean flag that indicates whether or not this
  2223  	// HTLC was accepted from the counterparty. A false value indicates that
  2224  	// this HTLC was offered by us. This flag is used determine the exact
  2225  	// witness type should be used to sweep the output.
  2226  	IsIncoming bool
  2227  }
  2228  
  2229  // BreachRetribution contains all the data necessary to bring a channel
  2230  // counterparty to justice claiming ALL lingering funds within the channel in
  2231  // the scenario that they broadcast a revoked commitment transaction. A
  2232  // BreachRetribution is created by the closeObserver if it detects an
  2233  // uncooperative close of the channel which uses a revoked commitment
  2234  // transaction. The BreachRetribution is then sent over the ContractBreach
  2235  // channel in order to allow the subscriber of the channel to dispatch justice.
  2236  type BreachRetribution struct {
  2237  	// BreachTransaction is the transaction which breached the channel
  2238  	// contract by spending from the funding multi-sig with a revoked
  2239  	// commitment transaction.
  2240  	BreachTransaction *wire.MsgTx
  2241  
  2242  	// BreachHeight records the block height confirming the breach
  2243  	// transaction, used as a height hint when registering for
  2244  	// confirmations.
  2245  	BreachHeight uint32
  2246  
  2247  	// ChainHash is the chain that the contract beach was identified
  2248  	// within. This is also the resident chain of the contract (the chain
  2249  	// the contract was created on).
  2250  	ChainHash chainhash.Hash
  2251  
  2252  	// RevokedStateNum is the revoked state number which was broadcast.
  2253  	RevokedStateNum uint64
  2254  
  2255  	// PendingHTLCs is a slice of the HTLCs which were pending at this
  2256  	// point within the channel's history transcript.
  2257  	PendingHTLCs []channeldb.HTLC
  2258  
  2259  	// LocalOutputSignDesc is a input.SignDescriptor which is capable of
  2260  	// generating the signature necessary to sweep the output within the
  2261  	// BreachTransaction that pays directly us.
  2262  	//
  2263  	// NOTE: A nil value indicates that the local output is considered dust
  2264  	// according to the remote party's dust limit.
  2265  	LocalOutputSignDesc *input.SignDescriptor
  2266  
  2267  	// LocalOutpoint is the outpoint of the output paying to us (the local
  2268  	// party) within the breach transaction.
  2269  	LocalOutpoint wire.OutPoint
  2270  
  2271  	// LocalDelay is the CSV delay for the to_remote script on the breached
  2272  	// commitment.
  2273  	LocalDelay uint32
  2274  
  2275  	// RemoteOutputSignDesc is a SignDescriptor which is capable of
  2276  	// generating the signature required to claim the funds as described
  2277  	// within the revocation clause of the remote party's commitment
  2278  	// output.
  2279  	//
  2280  	// NOTE: A nil value indicates that the local output is considered dust
  2281  	// according to the remote party's dust limit.
  2282  	RemoteOutputSignDesc *input.SignDescriptor
  2283  
  2284  	// RemoteOutpoint is the outpoint of the output paying to the remote
  2285  	// party within the breach transaction.
  2286  	RemoteOutpoint wire.OutPoint
  2287  
  2288  	// RemoteDelay specifies the CSV delay applied to to-local scripts on
  2289  	// the breaching commitment transaction.
  2290  	RemoteDelay uint32
  2291  
  2292  	// HtlcRetributions is a slice of HTLC retributions for each output
  2293  	// active HTLC output within the breached commitment transaction.
  2294  	HtlcRetributions []HtlcRetribution
  2295  
  2296  	// KeyRing contains the derived public keys used to construct the
  2297  	// breaching commitment transaction. This allows downstream clients to
  2298  	// have access to the public keys used in the scripts.
  2299  	KeyRing *CommitmentKeyRing
  2300  }
  2301  
  2302  // NewBreachRetribution creates a new fully populated BreachRetribution for the
  2303  // passed channel, at a particular revoked state number, and one which targets
  2304  // the passed commitment transaction.
  2305  func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64,
  2306  	breachHeight uint32) (*BreachRetribution, error) {
  2307  
  2308  	// Query the on-disk revocation log for the snapshot which was recorded
  2309  	// at this particular state num.
  2310  	revokedSnapshot, err := chanState.FindPreviousState(stateNum)
  2311  	if err != nil {
  2312  		return nil, err
  2313  	}
  2314  
  2315  	commitHash := revokedSnapshot.CommitTx.TxHash()
  2316  
  2317  	// With the state number broadcast known, we can now derive/restore the
  2318  	// proper revocation preimage necessary to sweep the remote party's
  2319  	// output.
  2320  	revocationPreimage, err := chanState.RevocationStore.LookUp(stateNum)
  2321  	if err != nil {
  2322  		return nil, err
  2323  	}
  2324  	commitmentSecret := secp256k1.PrivKeyFromBytes(
  2325  		revocationPreimage[:])
  2326  	commitmentPoint := commitmentSecret.PubKey()
  2327  
  2328  	// With the commitment point generated, we can now generate the four
  2329  	// keys we'll need to reconstruct the commitment state,
  2330  	keyRing := DeriveCommitmentKeys(
  2331  		commitmentPoint, false, chanState.ChanType,
  2332  		&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
  2333  	)
  2334  
  2335  	// Next, reconstruct the scripts as they were present at this state
  2336  	// number so we can have the proper witness script to sign and include
  2337  	// within the final witness.
  2338  	theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay)
  2339  	isRemoteInitiator := !chanState.IsInitiator
  2340  	var leaseExpiry uint32
  2341  	if chanState.ChanType.HasLeaseExpiration() {
  2342  		leaseExpiry = chanState.ThawHeight
  2343  	}
  2344  	theirScript, err := CommitScriptToSelf(
  2345  		chanState.ChanType, isRemoteInitiator, keyRing.ToLocalKey,
  2346  		keyRing.RevocationKey, theirDelay, leaseExpiry,
  2347  	)
  2348  	if err != nil {
  2349  		return nil, err
  2350  	}
  2351  
  2352  	// Since it is the remote breach we are reconstructing, the output going
  2353  	// to us will be a to-remote script with our local params.
  2354  	ourScript, ourDelay, err := CommitScriptToRemote(
  2355  		chanState.ChanType, isRemoteInitiator, keyRing.ToRemoteKey,
  2356  		leaseExpiry,
  2357  	)
  2358  	if err != nil {
  2359  		return nil, err
  2360  	}
  2361  
  2362  	// In order to fully populate the breach retribution struct, we'll need
  2363  	// to find the exact index of the commitment outputs.
  2364  	ourOutpoint := wire.OutPoint{
  2365  		Hash: commitHash,
  2366  		Tree: wire.TxTreeRegular,
  2367  	}
  2368  	theirOutpoint := wire.OutPoint{
  2369  		Hash: commitHash,
  2370  		Tree: wire.TxTreeRegular,
  2371  	}
  2372  	for i, txOut := range revokedSnapshot.CommitTx.TxOut {
  2373  		switch {
  2374  		case bytes.Equal(txOut.PkScript, ourScript.PkScript):
  2375  			ourOutpoint.Index = uint32(i)
  2376  		case bytes.Equal(txOut.PkScript, theirScript.PkScript):
  2377  			theirOutpoint.Index = uint32(i)
  2378  		}
  2379  	}
  2380  
  2381  	// Conditionally instantiate a sign descriptor for each of the
  2382  	// commitment outputs. If either is considered dust using the remote
  2383  	// party's dust limit, the respective sign descriptor will be nil.
  2384  	var (
  2385  		ourSignDesc   *input.SignDescriptor
  2386  		theirSignDesc *input.SignDescriptor
  2387  	)
  2388  
  2389  	// Compute the balances in atoms.
  2390  	ourAmt := revokedSnapshot.LocalBalance.ToAtoms()
  2391  	theirAmt := revokedSnapshot.RemoteBalance.ToAtoms()
  2392  
  2393  	// If our balance exceeds the remote party's dust limit, instantiate
  2394  	// the sign descriptor for our output.
  2395  	if ourAmt >= chanState.RemoteChanCfg.DustLimit {
  2396  		ourSignDesc = &input.SignDescriptor{
  2397  			SingleTweak:   keyRing.LocalCommitKeyTweak,
  2398  			KeyDesc:       chanState.LocalChanCfg.PaymentBasePoint,
  2399  			WitnessScript: ourScript.WitnessScript,
  2400  			Output: &wire.TxOut{
  2401  				Version:  scriptVersion,
  2402  				PkScript: ourScript.PkScript,
  2403  				Value:    int64(ourAmt),
  2404  			},
  2405  			HashType: txscript.SigHashAll,
  2406  		}
  2407  	}
  2408  
  2409  	// Similarly, if their balance exceeds the remote party's dust limit,
  2410  	// assemble the sign descriptor for their output, which we can sweep.
  2411  	if theirAmt >= chanState.RemoteChanCfg.DustLimit {
  2412  		theirSignDesc = &input.SignDescriptor{
  2413  			KeyDesc:       chanState.LocalChanCfg.RevocationBasePoint,
  2414  			DoubleTweak:   commitmentSecret,
  2415  			WitnessScript: theirScript.WitnessScript,
  2416  			Output: &wire.TxOut{
  2417  				Version:  scriptVersion,
  2418  				PkScript: theirScript.PkScript,
  2419  				Value:    int64(theirAmt),
  2420  			},
  2421  			HashType: txscript.SigHashAll,
  2422  		}
  2423  	}
  2424  
  2425  	// With the commitment outputs located, we'll now generate all the
  2426  	// retribution structs for each of the HTLC transactions active on the
  2427  	// remote commitment transaction.
  2428  	htlcRetributions := make([]HtlcRetribution, 0, len(revokedSnapshot.Htlcs))
  2429  	for _, htlc := range revokedSnapshot.Htlcs {
  2430  		// If the HTLC is dust, then we'll skip it as it doesn't have
  2431  		// an output on the commitment transaction.
  2432  		if HtlcIsDust(
  2433  			chanState.ChanType, htlc.Incoming, false,
  2434  			chainfee.AtomPerKByte(revokedSnapshot.FeePerKB),
  2435  			htlc.Amt.ToAtoms(), chanState.RemoteChanCfg.DustLimit,
  2436  		) {
  2437  			continue
  2438  		}
  2439  
  2440  		// We'll generate the original second level witness script now,
  2441  		// as we'll need it if we're revoking an HTLC output on the
  2442  		// remote commitment transaction, and *they* go to the second
  2443  		// level.
  2444  		secondLevelScript, err := SecondLevelHtlcScript(
  2445  			chanState.ChanType, isRemoteInitiator,
  2446  			keyRing.RevocationKey, keyRing.ToLocalKey, theirDelay,
  2447  			leaseExpiry,
  2448  		)
  2449  		if err != nil {
  2450  			return nil, err
  2451  		}
  2452  
  2453  		// If this is an incoming HTLC, then this means that they were
  2454  		// the sender of the HTLC (relative to us). So we'll
  2455  		// re-generate the sender HTLC script. Otherwise, is this was
  2456  		// an outgoing HTLC that we sent, then from the PoV of the
  2457  		// remote commitment state, they're the receiver of this HTLC.
  2458  		htlcPkScript, htlcWitnessScript, err := genHtlcScript(
  2459  			chanState.ChanType, htlc.Incoming, false,
  2460  			htlc.RefundTimeout, htlc.RHash, keyRing,
  2461  		)
  2462  		if err != nil {
  2463  			return nil, err
  2464  		}
  2465  
  2466  		htlcRetributions = append(htlcRetributions, HtlcRetribution{
  2467  			SignDesc: input.SignDescriptor{
  2468  				KeyDesc:       chanState.LocalChanCfg.RevocationBasePoint,
  2469  				DoubleTweak:   commitmentSecret,
  2470  				WitnessScript: htlcWitnessScript,
  2471  				Output: &wire.TxOut{
  2472  					Version:  scriptVersion,
  2473  					PkScript: htlcPkScript,
  2474  					Value:    int64(htlc.Amt.ToAtoms()),
  2475  				},
  2476  				HashType: txscript.SigHashAll,
  2477  			},
  2478  			OutPoint: wire.OutPoint{
  2479  				Hash:  commitHash,
  2480  				Index: uint32(htlc.OutputIndex),
  2481  				Tree:  wire.TxTreeRegular,
  2482  			},
  2483  			SecondLevelWitnessScript: secondLevelScript.WitnessScript,
  2484  			IsIncoming:               htlc.Incoming,
  2485  		})
  2486  	}
  2487  
  2488  	// Finally, with all the necessary data constructed, we can create the
  2489  	// BreachRetribution struct which houses all the data necessary to
  2490  	// swiftly bring justice to the cheating remote party.
  2491  	return &BreachRetribution{
  2492  		ChainHash:            chanState.ChainHash,
  2493  		BreachTransaction:    revokedSnapshot.CommitTx,
  2494  		BreachHeight:         breachHeight,
  2495  		RevokedStateNum:      stateNum,
  2496  		PendingHTLCs:         revokedSnapshot.Htlcs,
  2497  		LocalOutpoint:        ourOutpoint,
  2498  		LocalOutputSignDesc:  ourSignDesc,
  2499  		LocalDelay:           ourDelay,
  2500  		RemoteOutpoint:       theirOutpoint,
  2501  		RemoteOutputSignDesc: theirSignDesc,
  2502  		RemoteDelay:          theirDelay,
  2503  		HtlcRetributions:     htlcRetributions,
  2504  		KeyRing:              keyRing,
  2505  	}, nil
  2506  }
  2507  
  2508  // HtlcIsDust determines if an HTLC output is dust or not depending on two
  2509  // bits: if the HTLC is incoming and if the HTLC will be placed on our
  2510  // commitment transaction, or theirs. These two pieces of information are
  2511  // require as we currently used second-level HTLC transactions as off-chain
  2512  // covenants. Depending on the two bits, we'll either be using a timeout or
  2513  // success transaction which have different weights.
  2514  func HtlcIsDust(chanType channeldb.ChannelType,
  2515  	incoming, ourCommit bool, feePerKB chainfee.AtomPerKByte,
  2516  	htlcAmt, dustLimit dcrutil.Amount) bool {
  2517  
  2518  	// First we'll determine the fee required for this HTLC based on if this is
  2519  	// an incoming HTLC or not, and also on whose commitment transaction it
  2520  	// will be placed on.
  2521  	var htlcFee dcrutil.Amount
  2522  	switch {
  2523  
  2524  	// If this is an incoming HTLC on our commitment transaction, then the
  2525  	// second-level transaction will be a success transaction.
  2526  	case incoming && ourCommit:
  2527  		htlcFee = HtlcSuccessFee(chanType, feePerKB)
  2528  
  2529  	// If this is an incoming HTLC on their commitment transaction, then
  2530  	// we'll be using a second-level timeout transaction as they've added
  2531  	// this HTLC.
  2532  	case incoming && !ourCommit:
  2533  		htlcFee = HtlcTimeoutFee(chanType, feePerKB)
  2534  
  2535  	// If this is an outgoing HTLC on our commitment transaction, then
  2536  	// we'll be using a timeout transaction as we're the sender of the
  2537  	// HTLC.
  2538  	case !incoming && ourCommit:
  2539  		htlcFee = HtlcTimeoutFee(chanType, feePerKB)
  2540  
  2541  	// If this is an outgoing HTLC on their commitment transaction, then
  2542  	// we'll be using an HTLC success transaction as they're the receiver
  2543  	// of this HTLC.
  2544  	case !incoming && !ourCommit:
  2545  		htlcFee = HtlcSuccessFee(chanType, feePerKB)
  2546  	}
  2547  
  2548  	return (htlcAmt - htlcFee) < dustLimit
  2549  }
  2550  
  2551  // htlcView represents the "active" HTLCs at a particular point within the
  2552  // history of the HTLC update log.
  2553  type htlcView struct {
  2554  	ourUpdates   []*PaymentDescriptor
  2555  	theirUpdates []*PaymentDescriptor
  2556  	feePerKB     chainfee.AtomPerKByte
  2557  }
  2558  
  2559  // fetchHTLCView returns all the candidate HTLC updates which should be
  2560  // considered for inclusion within a commitment based on the passed HTLC log
  2561  // indexes.
  2562  func (lc *LightningChannel) fetchHTLCView(theirLogIndex, ourLogIndex uint64) *htlcView {
  2563  	var ourHTLCs []*PaymentDescriptor
  2564  	for e := lc.localUpdateLog.Front(); e != nil; e = e.Next() {
  2565  		htlc := e.Value.(*PaymentDescriptor)
  2566  
  2567  		// This HTLC is active from this point-of-view iff the log
  2568  		// index of the state update is below the specified index in
  2569  		// our update log.
  2570  		if htlc.LogIndex < ourLogIndex {
  2571  			ourHTLCs = append(ourHTLCs, htlc)
  2572  		}
  2573  	}
  2574  
  2575  	var theirHTLCs []*PaymentDescriptor
  2576  	for e := lc.remoteUpdateLog.Front(); e != nil; e = e.Next() {
  2577  		htlc := e.Value.(*PaymentDescriptor)
  2578  
  2579  		// If this is an incoming HTLC, then it is only active from
  2580  		// this point-of-view if the index of the HTLC addition in
  2581  		// their log is below the specified view index.
  2582  		if htlc.LogIndex < theirLogIndex {
  2583  			theirHTLCs = append(theirHTLCs, htlc)
  2584  		}
  2585  	}
  2586  
  2587  	return &htlcView{
  2588  		ourUpdates:   ourHTLCs,
  2589  		theirUpdates: theirHTLCs,
  2590  	}
  2591  }
  2592  
  2593  // fetchCommitmentView returns a populated commitment which expresses the state
  2594  // of the channel from the point of view of a local or remote chain, evaluating
  2595  // the HTLC log up to the passed indexes. This function is used to construct
  2596  // both local and remote commitment transactions in order to sign or verify new
  2597  // commitment updates. A fully populated commitment is returned which reflects
  2598  // the proper balances for both sides at this point in the commitment chain.
  2599  func (lc *LightningChannel) fetchCommitmentView(remoteChain bool,
  2600  	ourLogIndex, ourHtlcIndex, theirLogIndex, theirHtlcIndex uint64,
  2601  	keyRing *CommitmentKeyRing) (*commitment, error) {
  2602  
  2603  	commitChain := lc.localCommitChain
  2604  	dustLimit := lc.channelState.LocalChanCfg.DustLimit
  2605  	if remoteChain {
  2606  		commitChain = lc.remoteCommitChain
  2607  		dustLimit = lc.channelState.RemoteChanCfg.DustLimit
  2608  	}
  2609  
  2610  	nextHeight := commitChain.tip().height + 1
  2611  
  2612  	// Run through all the HTLCs that will be covered by this transaction
  2613  	// in order to update their commitment addition height, and to adjust
  2614  	// the balances on the commitment transaction accordingly. Note that
  2615  	// these balances will be *before* taking a commitment fee from the
  2616  	// initiator.
  2617  	htlcView := lc.fetchHTLCView(theirLogIndex, ourLogIndex)
  2618  	ourBalance, theirBalance, _, filteredHTLCView, err := lc.computeView(
  2619  		htlcView, remoteChain, true,
  2620  	)
  2621  	if err != nil {
  2622  		return nil, err
  2623  	}
  2624  	feePerKB := filteredHTLCView.feePerKB
  2625  
  2626  	// Actually generate unsigned commitment transaction for this view.
  2627  	commitTx, err := lc.commitBuilder.createUnsignedCommitmentTx(
  2628  		ourBalance, theirBalance, !remoteChain, feePerKB, nextHeight,
  2629  		filteredHTLCView, keyRing,
  2630  	)
  2631  	if err != nil {
  2632  		return nil, err
  2633  	}
  2634  
  2635  	// We'll assert that there hasn't been a mistake during fee calculation
  2636  	// leading to a fee too low.
  2637  	var totalOut dcrutil.Amount
  2638  	for _, txOut := range commitTx.txn.TxOut {
  2639  		totalOut += dcrutil.Amount(txOut.Value)
  2640  	}
  2641  	fee := lc.channelState.Capacity - totalOut
  2642  
  2643  	// Since the transaction is not signed yet, we manually add in the size
  2644  	// of the sigscript required to redeem the funding output.
  2645  	// msgTx.SerializeSize() includes one byte for sigscript len so there's
  2646  	// no need to add it here.
  2647  	size := int64(commitTx.txn.SerializeSize()) + input.FundingOutputSigScriptSize
  2648  
  2649  	effFeeRate := chainfee.AtomPerKByte(fee) * 1000 /
  2650  		chainfee.AtomPerKByte(size)
  2651  	if effFeeRate < chainfee.FeePerKBFloor && !disableFeeFloorCheck {
  2652  		return nil, fmt.Errorf("height=%v, for ChannelPoint(%v) "+
  2653  			"attempts to create commitment with too low feerate %v: %v",
  2654  			nextHeight, lc.channelState.FundingOutpoint,
  2655  			effFeeRate, spew.Sdump(commitTx))
  2656  	}
  2657  
  2658  	// With the commitment view created, store the resulting balances and
  2659  	// transaction with the other parameters for this height.
  2660  	c := &commitment{
  2661  		ourBalance:        commitTx.ourBalance,
  2662  		theirBalance:      commitTx.theirBalance,
  2663  		txn:               commitTx.txn,
  2664  		fee:               commitTx.fee,
  2665  		ourMessageIndex:   ourLogIndex,
  2666  		ourHtlcIndex:      ourHtlcIndex,
  2667  		theirMessageIndex: theirLogIndex,
  2668  		theirHtlcIndex:    theirHtlcIndex,
  2669  		height:            nextHeight,
  2670  		feePerKB:          feePerKB,
  2671  		dustLimit:         dustLimit,
  2672  		isOurs:            !remoteChain,
  2673  	}
  2674  
  2675  	// In order to ensure _none_ of the HTLC's associated with this new
  2676  	// commitment are mutated, we'll manually copy over each HTLC to its
  2677  	// respective slice.
  2678  	c.outgoingHTLCs = make([]PaymentDescriptor, len(filteredHTLCView.ourUpdates))
  2679  	for i, htlc := range filteredHTLCView.ourUpdates {
  2680  		c.outgoingHTLCs[i] = *htlc
  2681  	}
  2682  	c.incomingHTLCs = make([]PaymentDescriptor, len(filteredHTLCView.theirUpdates))
  2683  	for i, htlc := range filteredHTLCView.theirUpdates {
  2684  		c.incomingHTLCs[i] = *htlc
  2685  	}
  2686  
  2687  	// Finally, we'll populate all the HTLC indexes so we can track the
  2688  	// locations of each HTLC in the commitment state. We pass in the sorted
  2689  	// slice of CLTV deltas in order to properly locate HTLCs that otherwise
  2690  	// have the same payment hash and amount.
  2691  	err = c.populateHtlcIndexes(lc.channelState.ChanType, commitTx.cltvs)
  2692  	if err != nil {
  2693  		return nil, err
  2694  	}
  2695  
  2696  	return c, nil
  2697  }
  2698  
  2699  func fundingTxIn(chanState *channeldb.OpenChannel) wire.TxIn {
  2700  	return *wire.NewTxIn(&chanState.FundingOutpoint, 0, nil) // TODO(decred): Need correct input value
  2701  }
  2702  
  2703  // evaluateHTLCView processes all update entries in both HTLC update logs,
  2704  // producing a final view which is the result of properly applying all adds,
  2705  // settles, timeouts and fee updates found in both logs. The resulting view
  2706  // returned reflects the current state of HTLCs within the remote or local
  2707  // commitment chain, and the current commitment fee rate.
  2708  //
  2709  // If mutateState is set to true, then the add height of all added HTLCs
  2710  // will be set to nextHeight, and the remove height of all removed HTLCs
  2711  // will be set to nextHeight. This should therefore only be set to true
  2712  // once for each height, and only in concert with signing a new commitment.
  2713  // TODO(halseth): return htlcs to mutate instead of mutating inside
  2714  // method.
  2715  func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance,
  2716  	theirBalance *lnwire.MilliAtom, nextHeight uint64,
  2717  	remoteChain, mutateState bool) (*htlcView, error) {
  2718  
  2719  	// We initialize the view's fee rate to the fee rate of the unfiltered
  2720  	// view. If any fee updates are found when evaluating the view, it will
  2721  	// be updated.
  2722  	newView := &htlcView{
  2723  		feePerKB: view.feePerKB,
  2724  	}
  2725  
  2726  	// We use two maps, one for the local log and one for the remote log to
  2727  	// keep track of which entries we need to skip when creating the final
  2728  	// htlc view. We skip an entry whenever we find a settle or a timeout
  2729  	// modifying an entry.
  2730  	skipUs := make(map[uint64]struct{})
  2731  	skipThem := make(map[uint64]struct{})
  2732  
  2733  	// First we run through non-add entries in both logs, populating the
  2734  	// skip sets and mutating the current chain state (crediting balances,
  2735  	// etc) to reflect the settle/timeout entry encountered.
  2736  	for _, entry := range view.ourUpdates {
  2737  		switch entry.EntryType {
  2738  		// Skip adds for now. They will be processed below.
  2739  		case Add:
  2740  			continue
  2741  
  2742  		// Process fee updates, updating the current feePerKB.
  2743  		case FeeUpdate:
  2744  			processFeeUpdate(
  2745  				entry, nextHeight, remoteChain, mutateState,
  2746  				newView,
  2747  			)
  2748  			continue
  2749  		}
  2750  
  2751  		// If we're settling an inbound HTLC, and it hasn't been
  2752  		// processed yet, then increment our state tracking the total
  2753  		// number of atoms we've received within the channel.
  2754  		if mutateState && entry.EntryType == Settle && !remoteChain &&
  2755  			entry.removeCommitHeightLocal == 0 {
  2756  			lc.channelState.TotalMAtomsReceived += entry.Amount
  2757  		}
  2758  
  2759  		addEntry, err := lc.fetchParent(entry, remoteChain, true)
  2760  		if err != nil {
  2761  			return nil, err
  2762  		}
  2763  
  2764  		skipThem[addEntry.HtlcIndex] = struct{}{}
  2765  		processRemoveEntry(entry, ourBalance, theirBalance,
  2766  			nextHeight, remoteChain, true, mutateState)
  2767  	}
  2768  	for _, entry := range view.theirUpdates {
  2769  		switch entry.EntryType {
  2770  		// Skip adds for now. They will be processed below.
  2771  		case Add:
  2772  			continue
  2773  
  2774  		// Process fee updates, updating the current feePerKB.
  2775  		case FeeUpdate:
  2776  			processFeeUpdate(
  2777  				entry, nextHeight, remoteChain, mutateState,
  2778  				newView,
  2779  			)
  2780  			continue
  2781  		}
  2782  
  2783  		// If the remote party is settling one of our outbound HTLC's,
  2784  		// and it hasn't been processed, yet, the increment our state
  2785  		// tracking the total number of atoms we've sent within the
  2786  		// channel.
  2787  		if mutateState && entry.EntryType == Settle && !remoteChain &&
  2788  			entry.removeCommitHeightLocal == 0 {
  2789  			lc.channelState.TotalMAtomsSent += entry.Amount
  2790  		}
  2791  
  2792  		addEntry, err := lc.fetchParent(entry, remoteChain, false)
  2793  		if err != nil {
  2794  			return nil, err
  2795  		}
  2796  
  2797  		skipUs[addEntry.HtlcIndex] = struct{}{}
  2798  		processRemoveEntry(entry, ourBalance, theirBalance,
  2799  			nextHeight, remoteChain, false, mutateState)
  2800  	}
  2801  
  2802  	// Next we take a second pass through all the log entries, skipping any
  2803  	// settled HTLCs, and debiting the chain state balance due to any newly
  2804  	// added HTLCs.
  2805  	for _, entry := range view.ourUpdates {
  2806  		isAdd := entry.EntryType == Add
  2807  		if _, ok := skipUs[entry.HtlcIndex]; !isAdd || ok {
  2808  			continue
  2809  		}
  2810  
  2811  		processAddEntry(entry, ourBalance, theirBalance, nextHeight,
  2812  			remoteChain, false, mutateState)
  2813  		newView.ourUpdates = append(newView.ourUpdates, entry)
  2814  	}
  2815  	for _, entry := range view.theirUpdates {
  2816  		isAdd := entry.EntryType == Add
  2817  		if _, ok := skipThem[entry.HtlcIndex]; !isAdd || ok {
  2818  			continue
  2819  		}
  2820  
  2821  		processAddEntry(entry, ourBalance, theirBalance, nextHeight,
  2822  			remoteChain, true, mutateState)
  2823  		newView.theirUpdates = append(newView.theirUpdates, entry)
  2824  	}
  2825  
  2826  	return newView, nil
  2827  }
  2828  
  2829  // fetchParent is a helper that looks up update log parent entries in the
  2830  // appropriate log.
  2831  func (lc *LightningChannel) fetchParent(entry *PaymentDescriptor,
  2832  	remoteChain, remoteLog bool) (*PaymentDescriptor, error) {
  2833  
  2834  	var (
  2835  		updateLog *updateLog
  2836  		logName   string
  2837  	)
  2838  
  2839  	if remoteLog {
  2840  		updateLog = lc.remoteUpdateLog
  2841  		logName = "remote"
  2842  	} else {
  2843  		updateLog = lc.localUpdateLog
  2844  		logName = "local"
  2845  	}
  2846  
  2847  	addEntry := updateLog.lookupHtlc(entry.ParentIndex)
  2848  
  2849  	switch {
  2850  	// We check if the parent entry is not found at this point.
  2851  	// This could happen for old versions of lnd, and we return an
  2852  	// error to gracefully shut down the state machine if such an
  2853  	// entry is still in the logs.
  2854  	case addEntry == nil:
  2855  		return nil, fmt.Errorf("unable to find parent entry "+
  2856  			"%d in %v update log: %v\nUpdatelog: %v",
  2857  			entry.ParentIndex, logName,
  2858  			newLogClosure(func() string {
  2859  				return spew.Sdump(entry)
  2860  			}), newLogClosure(func() string {
  2861  				return spew.Sdump(updateLog)
  2862  			}),
  2863  		)
  2864  
  2865  	// The parent add height should never be zero at this point. If
  2866  	// that's the case we probably forgot to send a new commitment.
  2867  	case remoteChain && addEntry.addCommitHeightRemote == 0:
  2868  		return nil, fmt.Errorf("parent entry %d for update %d "+
  2869  			"had zero remote add height", entry.ParentIndex,
  2870  			entry.LogIndex)
  2871  	case !remoteChain && addEntry.addCommitHeightLocal == 0:
  2872  		return nil, fmt.Errorf("parent entry %d for update %d "+
  2873  			"had zero local add height", entry.ParentIndex,
  2874  			entry.LogIndex)
  2875  	}
  2876  
  2877  	return addEntry, nil
  2878  }
  2879  
  2880  // processAddEntry evaluates the effect of an add entry within the HTLC log.
  2881  // If the HTLC hasn't yet been committed in either chain, then the height it
  2882  // was committed is updated. Keeping track of this inclusion height allows us to
  2883  // later compact the log once the change is fully committed in both chains.
  2884  func processAddEntry(htlc *PaymentDescriptor, ourBalance, theirBalance *lnwire.MilliAtom,
  2885  	nextHeight uint64, remoteChain bool, isIncoming, mutateState bool) {
  2886  
  2887  	// If we're evaluating this entry for the remote chain (to create/view
  2888  	// a new commitment), then we'll may be updating the height this entry
  2889  	// was added to the chain. Otherwise, we may be updating the entry's
  2890  	// height w.r.t the local chain.
  2891  	var addHeight *uint64
  2892  	if remoteChain {
  2893  		addHeight = &htlc.addCommitHeightRemote
  2894  	} else {
  2895  		addHeight = &htlc.addCommitHeightLocal
  2896  	}
  2897  
  2898  	if *addHeight != 0 {
  2899  		return
  2900  	}
  2901  
  2902  	if isIncoming {
  2903  		// If this is a new incoming (un-committed) HTLC, then we need
  2904  		// to update their balance accordingly by subtracting the
  2905  		// amount of the HTLC that are funds pending.
  2906  		*theirBalance -= htlc.Amount
  2907  	} else {
  2908  		// Similarly, we need to debit our balance if this is an out
  2909  		// going HTLC to reflect the pending balance.
  2910  		*ourBalance -= htlc.Amount
  2911  	}
  2912  
  2913  	if mutateState {
  2914  		*addHeight = nextHeight
  2915  	}
  2916  }
  2917  
  2918  // processRemoveEntry processes a log entry which settles or times out a
  2919  // previously added HTLC. If the removal entry has already been processed, it
  2920  // is skipped.
  2921  func processRemoveEntry(htlc *PaymentDescriptor, ourBalance,
  2922  	theirBalance *lnwire.MilliAtom, nextHeight uint64,
  2923  	remoteChain bool, isIncoming, mutateState bool) {
  2924  
  2925  	var removeHeight *uint64
  2926  	if remoteChain {
  2927  		removeHeight = &htlc.removeCommitHeightRemote
  2928  	} else {
  2929  		removeHeight = &htlc.removeCommitHeightLocal
  2930  	}
  2931  
  2932  	// Ignore any removal entries which have already been processed.
  2933  	if *removeHeight != 0 {
  2934  		return
  2935  	}
  2936  
  2937  	switch {
  2938  	// If an incoming HTLC is being settled, then this means that we've
  2939  	// received the preimage either from another subsystem, or the
  2940  	// upstream peer in the route. Therefore, we increase our balance by
  2941  	// the HTLC amount.
  2942  	case isIncoming && htlc.EntryType == Settle:
  2943  		*ourBalance += htlc.Amount
  2944  
  2945  	// Otherwise, this HTLC is being failed out, therefore the value of the
  2946  	// HTLC should return to the remote party.
  2947  	case isIncoming && (htlc.EntryType == Fail || htlc.EntryType == MalformedFail):
  2948  		*theirBalance += htlc.Amount
  2949  
  2950  	// If an outgoing HTLC is being settled, then this means that the
  2951  	// downstream party resented the preimage or learned of it via a
  2952  	// downstream peer. In either case, we credit their settled value with
  2953  	// the value of the HTLC.
  2954  	case !isIncoming && htlc.EntryType == Settle:
  2955  		*theirBalance += htlc.Amount
  2956  
  2957  	// Otherwise, one of our outgoing HTLC's has timed out, so the value of
  2958  	// the HTLC should be returned to our settled balance.
  2959  	case !isIncoming && (htlc.EntryType == Fail || htlc.EntryType == MalformedFail):
  2960  		*ourBalance += htlc.Amount
  2961  	}
  2962  
  2963  	if mutateState {
  2964  		*removeHeight = nextHeight
  2965  	}
  2966  }
  2967  
  2968  // processFeeUpdate processes a log update that updates the current commitment
  2969  // fee.
  2970  func processFeeUpdate(feeUpdate *PaymentDescriptor, nextHeight uint64,
  2971  	remoteChain bool, mutateState bool, view *htlcView) {
  2972  
  2973  	// Fee updates are applied for all commitments after they are
  2974  	// sent/received, so we consider them being added and removed at the
  2975  	// same height.
  2976  	var addHeight *uint64
  2977  	var removeHeight *uint64
  2978  	if remoteChain {
  2979  		addHeight = &feeUpdate.addCommitHeightRemote
  2980  		removeHeight = &feeUpdate.removeCommitHeightRemote
  2981  	} else {
  2982  		addHeight = &feeUpdate.addCommitHeightLocal
  2983  		removeHeight = &feeUpdate.removeCommitHeightLocal
  2984  	}
  2985  
  2986  	if *addHeight != 0 {
  2987  		return
  2988  	}
  2989  
  2990  	// If the update wasn't already locked in, update the current fee rate
  2991  	// to reflect this update.
  2992  	view.feePerKB = chainfee.AtomPerKByte(feeUpdate.Amount.ToAtoms())
  2993  
  2994  	if mutateState {
  2995  		*addHeight = nextHeight
  2996  		*removeHeight = nextHeight
  2997  	}
  2998  }
  2999  
  3000  // generateRemoteHtlcSigJobs generates a series of HTLC signature jobs for the
  3001  // sig pool, along with a channel that if closed, will cancel any jobs after
  3002  // they have been submitted to the sigPool. This method is to be used when
  3003  // generating a new commitment for the remote party. The jobs generated by the
  3004  // signature can be submitted to the sigPool to generate all the signatures
  3005  // asynchronously and in parallel.
  3006  func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
  3007  	chanType channeldb.ChannelType, isRemoteInitiator bool,
  3008  	leaseExpiry uint32, localChanCfg, remoteChanCfg *channeldb.ChannelConfig,
  3009  	remoteCommitView *commitment) ([]SignJob, chan struct{}, error) {
  3010  
  3011  	txHash := remoteCommitView.txn.TxHash()
  3012  	dustLimit := remoteChanCfg.DustLimit
  3013  	feePerKB := remoteCommitView.feePerKB
  3014  	sigHashType := HtlcSigHashType(chanType)
  3015  
  3016  	// With the keys generated, we'll make a slice with enough capacity to
  3017  	// hold potentially all the HTLCs. The actual slice may be a bit
  3018  	// smaller (than its total capacity) and some HTLCs may be dust.
  3019  	numSigs := (len(remoteCommitView.incomingHTLCs) +
  3020  		len(remoteCommitView.outgoingHTLCs))
  3021  	sigBatch := make([]SignJob, 0, numSigs)
  3022  
  3023  	var err error
  3024  	cancelChan := make(chan struct{})
  3025  
  3026  	// For each outgoing and incoming HTLC, if the HTLC isn't considered a
  3027  	// dust output after taking into account second-level HTLC fees, then a
  3028  	// sigJob will be generated and appended to the current batch.
  3029  	for _, htlc := range remoteCommitView.incomingHTLCs {
  3030  		if HtlcIsDust(
  3031  			chanType, true, false, feePerKB,
  3032  			htlc.Amount.ToAtoms(), dustLimit,
  3033  		) {
  3034  			continue
  3035  		}
  3036  
  3037  		// If the HTLC isn't dust, then we'll create an empty sign job
  3038  		// to add to the batch momentarily.
  3039  		sigJob := SignJob{}
  3040  		sigJob.Cancel = cancelChan
  3041  		sigJob.Resp = make(chan SignJobResp, 1)
  3042  
  3043  		// As this is an incoming HTLC and we're sinning the commitment
  3044  		// transaction of the remote node, we'll need to generate an
  3045  		// HTLC timeout transaction for them. The output of the timeout
  3046  		// transaction needs to account for fees, so we'll compute the
  3047  		// required fee and output now.
  3048  		htlcFee := HtlcTimeoutFee(chanType, feePerKB)
  3049  		outputAmt := htlc.Amount.ToAtoms() - htlcFee
  3050  
  3051  		// With the fee calculate, we can properly create the HTLC
  3052  		// timeout transaction using the HTLC amount minus the fee.
  3053  		op := wire.OutPoint{
  3054  			Hash:  txHash,
  3055  			Index: uint32(htlc.remoteOutputIndex),
  3056  			Tree:  wire.TxTreeRegular,
  3057  		}
  3058  		sigJob.Tx, err = CreateHtlcTimeoutTx(
  3059  			chanType, isRemoteInitiator, op, outputAmt,
  3060  			htlc.Timeout, uint32(remoteChanCfg.CsvDelay),
  3061  			leaseExpiry, keyRing.RevocationKey, keyRing.ToLocalKey,
  3062  		)
  3063  		if err != nil {
  3064  			return nil, nil, err
  3065  		}
  3066  
  3067  		// Finally, we'll generate a sign descriptor to generate a
  3068  		// signature to give to the remote party for this commitment
  3069  		// transaction. Note we use the raw HTLC amount.
  3070  		txOut := remoteCommitView.txn.TxOut[htlc.remoteOutputIndex]
  3071  		sigJob.SignDesc = input.SignDescriptor{
  3072  			KeyDesc:       localChanCfg.HtlcBasePoint,
  3073  			SingleTweak:   keyRing.LocalHtlcKeyTweak,
  3074  			WitnessScript: htlc.theirWitnessScript,
  3075  			Output:        txOut,
  3076  			HashType:      sigHashType,
  3077  			InputIndex:    0,
  3078  		}
  3079  		sigJob.OutputIndex = htlc.remoteOutputIndex
  3080  
  3081  		sigBatch = append(sigBatch, sigJob)
  3082  	}
  3083  	for _, htlc := range remoteCommitView.outgoingHTLCs {
  3084  		if HtlcIsDust(
  3085  			chanType, false, false, feePerKB,
  3086  			htlc.Amount.ToAtoms(), dustLimit,
  3087  		) {
  3088  			continue
  3089  		}
  3090  
  3091  		sigJob := SignJob{}
  3092  		sigJob.Cancel = cancelChan
  3093  		sigJob.Resp = make(chan SignJobResp, 1)
  3094  
  3095  		// As this is an outgoing HTLC and we're signing the commitment
  3096  		// transaction of the remote node, we'll need to generate an
  3097  		// HTLC success transaction for them. The output of the timeout
  3098  		// transaction needs to account for fees, so we'll compute the
  3099  		// required fee and output now.
  3100  		htlcFee := HtlcSuccessFee(chanType, feePerKB)
  3101  		outputAmt := htlc.Amount.ToAtoms() - htlcFee
  3102  
  3103  		// With the proper output amount calculated, we can now
  3104  		// generate the success transaction using the remote party's
  3105  		// CSV delay.
  3106  		op := wire.OutPoint{
  3107  			Hash:  txHash,
  3108  			Index: uint32(htlc.remoteOutputIndex),
  3109  			Tree:  wire.TxTreeRegular,
  3110  		}
  3111  		sigJob.Tx, err = CreateHtlcSuccessTx(
  3112  			chanType, isRemoteInitiator, op, outputAmt,
  3113  			uint32(remoteChanCfg.CsvDelay), leaseExpiry,
  3114  			keyRing.RevocationKey, keyRing.ToLocalKey,
  3115  		)
  3116  		if err != nil {
  3117  			return nil, nil, err
  3118  		}
  3119  
  3120  		// Finally, we'll generate a sign descriptor to generate a
  3121  		// signature to give to the remote party for this commitment
  3122  		// transaction. Note we use the raw HTLC amount.
  3123  		txOut := remoteCommitView.txn.TxOut[htlc.remoteOutputIndex]
  3124  		sigJob.SignDesc = input.SignDescriptor{
  3125  			KeyDesc:       localChanCfg.HtlcBasePoint,
  3126  			SingleTweak:   keyRing.LocalHtlcKeyTweak,
  3127  			WitnessScript: htlc.theirWitnessScript,
  3128  			Output:        txOut,
  3129  			HashType:      sigHashType,
  3130  			InputIndex:    0,
  3131  		}
  3132  		sigJob.OutputIndex = htlc.remoteOutputIndex
  3133  
  3134  		sigBatch = append(sigBatch, sigJob)
  3135  	}
  3136  
  3137  	return sigBatch, cancelChan, nil
  3138  }
  3139  
  3140  // createCommitDiff will create a commit diff given a new pending commitment
  3141  // for the remote party and the necessary signatures for the remote party to
  3142  // validate this new state. This function is called right before sending the
  3143  // new commitment to the remote party. The commit diff returned contains all
  3144  // information necessary for retransmission.
  3145  func (lc *LightningChannel) createCommitDiff(
  3146  	newCommit *commitment, commitSig lnwire.Sig,
  3147  	htlcSigs []lnwire.Sig) (*channeldb.CommitDiff, error) {
  3148  
  3149  	// First, we need to convert the funding outpoint into the ID that's
  3150  	// used on the wire to identify this channel. We'll use this shortly
  3151  	// when recording the exact CommitSig message that we'll be sending
  3152  	// out.
  3153  	chanID := lnwire.NewChanIDFromOutPoint(&lc.channelState.FundingOutpoint)
  3154  
  3155  	var (
  3156  		logUpdates        []channeldb.LogUpdate
  3157  		ackAddRefs        []channeldb.AddRef
  3158  		settleFailRefs    []channeldb.SettleFailRef
  3159  		openCircuitKeys   []channeldb.CircuitKey
  3160  		closedCircuitKeys []channeldb.CircuitKey
  3161  	)
  3162  
  3163  	// We'll now run through our local update log to locate the items which
  3164  	// were only just committed within this pending state. This will be the
  3165  	// set of items we need to retransmit if we reconnect and find that
  3166  	// they didn't process this new state fully.
  3167  	for e := lc.localUpdateLog.Front(); e != nil; e = e.Next() {
  3168  		pd := e.Value.(*PaymentDescriptor)
  3169  
  3170  		// If this entry wasn't committed at the exact height of this
  3171  		// remote commitment, then we'll skip it as it was already
  3172  		// lingering in the log.
  3173  		if pd.addCommitHeightRemote != newCommit.height &&
  3174  			pd.removeCommitHeightRemote != newCommit.height {
  3175  
  3176  			continue
  3177  		}
  3178  
  3179  		// Knowing that this update is a part of this new commitment,
  3180  		// we'll create a log update and not its index in the log so
  3181  		// we can later restore it properly if a restart occurs.
  3182  		logUpdate := channeldb.LogUpdate{
  3183  			LogIndex: pd.LogIndex,
  3184  		}
  3185  
  3186  		// We'll map the type of the PaymentDescriptor to one of the
  3187  		// four messages that it corresponds to. With this set of
  3188  		// messages obtained, we can simply read from disk and re-send
  3189  		// them in the case of a needed channel sync.
  3190  		switch pd.EntryType {
  3191  		case Add:
  3192  			htlc := &lnwire.UpdateAddHTLC{
  3193  				ChanID:      chanID,
  3194  				ID:          pd.HtlcIndex,
  3195  				Amount:      pd.Amount,
  3196  				Expiry:      pd.Timeout,
  3197  				PaymentHash: pd.RHash,
  3198  			}
  3199  			copy(htlc.OnionBlob[:], pd.OnionBlob)
  3200  			logUpdate.UpdateMsg = htlc
  3201  
  3202  			// Gather any references for circuits opened by this Add
  3203  			// HTLC.
  3204  			if pd.OpenCircuitKey != nil {
  3205  				openCircuitKeys = append(openCircuitKeys,
  3206  					*pd.OpenCircuitKey)
  3207  			}
  3208  
  3209  			logUpdates = append(logUpdates, logUpdate)
  3210  
  3211  			// Short circuit here since an add should not have any
  3212  			// of the references gathered in the case of settles,
  3213  			// fails or malformed fails.
  3214  			continue
  3215  
  3216  		case Settle:
  3217  			logUpdate.UpdateMsg = &lnwire.UpdateFulfillHTLC{
  3218  				ChanID:          chanID,
  3219  				ID:              pd.ParentIndex,
  3220  				PaymentPreimage: pd.RPreimage,
  3221  			}
  3222  
  3223  		case Fail:
  3224  			logUpdate.UpdateMsg = &lnwire.UpdateFailHTLC{
  3225  				ChanID: chanID,
  3226  				ID:     pd.ParentIndex,
  3227  				Reason: pd.FailReason,
  3228  			}
  3229  
  3230  		case MalformedFail:
  3231  			logUpdate.UpdateMsg = &lnwire.UpdateFailMalformedHTLC{
  3232  				ChanID:       chanID,
  3233  				ID:           pd.ParentIndex,
  3234  				ShaOnionBlob: pd.ShaOnionBlob,
  3235  				FailureCode:  pd.FailCode,
  3236  			}
  3237  
  3238  		case FeeUpdate:
  3239  			// The Amount field holds the feerate denominated in
  3240  			// msat. Since feerates are only denominated in sat/kw,
  3241  			// we can convert it without loss of precision.
  3242  			logUpdate.UpdateMsg = &lnwire.UpdateFee{
  3243  				ChanID:   chanID,
  3244  				FeePerKB: uint32(pd.Amount.ToAtoms()),
  3245  			}
  3246  		}
  3247  
  3248  		// Gather the fwd pkg references from any settle or fail
  3249  		// packets, if they exist.
  3250  		if pd.SourceRef != nil {
  3251  			ackAddRefs = append(ackAddRefs, *pd.SourceRef)
  3252  		}
  3253  		if pd.DestRef != nil {
  3254  			settleFailRefs = append(settleFailRefs, *pd.DestRef)
  3255  		}
  3256  		if pd.ClosedCircuitKey != nil {
  3257  			closedCircuitKeys = append(closedCircuitKeys,
  3258  				*pd.ClosedCircuitKey)
  3259  		}
  3260  
  3261  		logUpdates = append(logUpdates, logUpdate)
  3262  	}
  3263  
  3264  	// With the set of log updates mapped into wire messages, we'll now
  3265  	// convert the in-memory commit into a format suitable for writing to
  3266  	// disk.
  3267  	diskCommit := newCommit.toDiskCommit(false)
  3268  
  3269  	return &channeldb.CommitDiff{
  3270  		Commitment: *diskCommit,
  3271  		CommitSig: &lnwire.CommitSig{
  3272  			ChanID: lnwire.NewChanIDFromOutPoint(
  3273  				&lc.channelState.FundingOutpoint,
  3274  			),
  3275  			CommitSig: commitSig,
  3276  			HtlcSigs:  htlcSigs,
  3277  		},
  3278  		LogUpdates:        logUpdates,
  3279  		OpenedCircuitKeys: openCircuitKeys,
  3280  		ClosedCircuitKeys: closedCircuitKeys,
  3281  		AddAcks:           ackAddRefs,
  3282  		SettleFailAcks:    settleFailRefs,
  3283  	}, nil
  3284  }
  3285  
  3286  // getUnsignedAckedUpdates returns all remote log updates that we haven't
  3287  // signed for yet ourselves.
  3288  func (lc *LightningChannel) getUnsignedAckedUpdates() []channeldb.LogUpdate {
  3289  	// First, we need to convert the funding outpoint into the ID that's
  3290  	// used on the wire to identify this channel.
  3291  	chanID := lnwire.NewChanIDFromOutPoint(&lc.channelState.FundingOutpoint)
  3292  
  3293  	// Fetch the last remote update that we have signed for.
  3294  	lastRemoteCommitted := lc.remoteCommitChain.tail().theirMessageIndex
  3295  
  3296  	// Fetch the last remote update that we have acked.
  3297  	lastLocalCommitted := lc.localCommitChain.tail().theirMessageIndex
  3298  
  3299  	// We'll now run through the remote update log to locate the items that
  3300  	// we haven't signed for yet. This will be the set of items we need to
  3301  	// restore if we reconnect in order to produce the signature that the
  3302  	// remote party expects.
  3303  	var logUpdates []channeldb.LogUpdate
  3304  	for e := lc.remoteUpdateLog.Front(); e != nil; e = e.Next() {
  3305  		pd := e.Value.(*PaymentDescriptor)
  3306  
  3307  		// Skip all remote updates that we have already included in our
  3308  		// commit chain.
  3309  		if pd.LogIndex < lastRemoteCommitted {
  3310  			continue
  3311  		}
  3312  
  3313  		// Skip all remote updates that we haven't acked yet. At the
  3314  		// moment this function is called, there shouldn't be any, but
  3315  		// we check it anyway to make this function more generally
  3316  		// usable.
  3317  		if pd.LogIndex >= lastLocalCommitted {
  3318  			continue
  3319  		}
  3320  
  3321  		logUpdate := channeldb.LogUpdate{
  3322  			LogIndex: pd.LogIndex,
  3323  		}
  3324  
  3325  		// We'll map the type of the PaymentDescriptor to one of the
  3326  		// four messages that it corresponds to.
  3327  		switch pd.EntryType {
  3328  		case Add:
  3329  			htlc := &lnwire.UpdateAddHTLC{
  3330  				ChanID:      chanID,
  3331  				ID:          pd.HtlcIndex,
  3332  				Amount:      pd.Amount,
  3333  				Expiry:      pd.Timeout,
  3334  				PaymentHash: pd.RHash,
  3335  			}
  3336  			copy(htlc.OnionBlob[:], pd.OnionBlob)
  3337  			logUpdate.UpdateMsg = htlc
  3338  
  3339  		case Settle:
  3340  			logUpdate.UpdateMsg = &lnwire.UpdateFulfillHTLC{
  3341  				ChanID:          chanID,
  3342  				ID:              pd.ParentIndex,
  3343  				PaymentPreimage: pd.RPreimage,
  3344  			}
  3345  
  3346  		case Fail:
  3347  			logUpdate.UpdateMsg = &lnwire.UpdateFailHTLC{
  3348  				ChanID: chanID,
  3349  				ID:     pd.ParentIndex,
  3350  				Reason: pd.FailReason,
  3351  			}
  3352  
  3353  		case MalformedFail:
  3354  			logUpdate.UpdateMsg = &lnwire.UpdateFailMalformedHTLC{
  3355  				ChanID:       chanID,
  3356  				ID:           pd.ParentIndex,
  3357  				ShaOnionBlob: pd.ShaOnionBlob,
  3358  				FailureCode:  pd.FailCode,
  3359  			}
  3360  
  3361  		case FeeUpdate:
  3362  			// The Amount field holds the feerate denominated in
  3363  			// msat. Since feerates are only denominated in sat/kw,
  3364  			// we can convert it without loss of precision.
  3365  			logUpdate.UpdateMsg = &lnwire.UpdateFee{
  3366  				ChanID:   chanID,
  3367  				FeePerKB: uint32(pd.Amount.ToAtoms()),
  3368  			}
  3369  		}
  3370  
  3371  		logUpdates = append(logUpdates, logUpdate)
  3372  	}
  3373  	return logUpdates
  3374  }
  3375  
  3376  // validateCommitmentSanity is used to validate the current state of the
  3377  // commitment transaction in terms of the ChannelConstraints that we and our
  3378  // remote peer agreed upon during the funding workflow. The
  3379  // predict[Our|Their]Add should parameters should be set to a valid
  3380  // PaymentDescriptor if we are validating in the state when adding a new HTLC,
  3381  // or nil otherwise.
  3382  func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
  3383  	ourLogCounter uint64, remoteChain bool,
  3384  	predictOurAdd, predictTheirAdd *PaymentDescriptor) error {
  3385  
  3386  	// Fetch all updates not committed.
  3387  	view := lc.fetchHTLCView(theirLogCounter, ourLogCounter)
  3388  
  3389  	// If we are checking if we can add a new HTLC, we add this to the
  3390  	// appropriate update log, in order to validate the sanity of the
  3391  	// commitment resulting from _actually adding_ this HTLC to the state.
  3392  	if predictOurAdd != nil {
  3393  		view.ourUpdates = append(view.ourUpdates, predictOurAdd)
  3394  	}
  3395  	if predictTheirAdd != nil {
  3396  		view.theirUpdates = append(view.theirUpdates, predictTheirAdd)
  3397  	}
  3398  
  3399  	commitChain := lc.localCommitChain
  3400  	if remoteChain {
  3401  		commitChain = lc.remoteCommitChain
  3402  	}
  3403  	ourInitialBalance := commitChain.tip().ourBalance
  3404  	theirInitialBalance := commitChain.tip().theirBalance
  3405  
  3406  	ourBalance, theirBalance, commitSize, filteredView, err := lc.computeView(
  3407  		view, remoteChain, false,
  3408  	)
  3409  	if err != nil {
  3410  		return err
  3411  	}
  3412  	feePerKB := filteredView.feePerKB
  3413  
  3414  	// Calculate the commitment fee, and subtract it from the initiator's
  3415  	// balance.
  3416  	commitFee := feePerKB.FeeForSize(commitSize)
  3417  	commitFeeMAtoms := lnwire.NewMAtomsFromAtoms(commitFee)
  3418  	if lc.channelState.IsInitiator {
  3419  		ourBalance -= commitFeeMAtoms
  3420  	} else {
  3421  		theirBalance -= commitFeeMAtoms
  3422  	}
  3423  
  3424  	// As a quick sanity check, we'll ensure that if we interpret the
  3425  	// balances as signed integers, they haven't dipped down below zero. If
  3426  	// they have, then this indicates that a party doesn't have sufficient
  3427  	// balance to satisfy the final evaluated HTLC's.
  3428  	switch {
  3429  	case int64(ourBalance) < 0:
  3430  		return ErrBelowChanReserve
  3431  	case int64(theirBalance) < 0:
  3432  		return ErrBelowChanReserve
  3433  	}
  3434  
  3435  	// Ensure that the fee being applied is enough to be relayed across the
  3436  	// network in a reasonable time frame.
  3437  	if feePerKB < chainfee.FeePerKBFloor && !disableFeeFloorCheck {
  3438  		return fmt.Errorf("commitment fee per kb %v below fee floor %v",
  3439  			feePerKB, chainfee.FeePerKBFloor)
  3440  	}
  3441  
  3442  	// If the added HTLCs will decrease the balance, make sure they won't
  3443  	// dip the local and remote balances below the channel reserves.
  3444  	switch {
  3445  	case ourBalance < ourInitialBalance &&
  3446  		ourBalance < lnwire.NewMAtomsFromAtoms(
  3447  			lc.channelState.LocalChanCfg.ChanReserve):
  3448  
  3449  		return ErrBelowChanReserve
  3450  	case theirBalance < theirInitialBalance &&
  3451  		theirBalance < lnwire.NewMAtomsFromAtoms(
  3452  			lc.channelState.RemoteChanCfg.ChanReserve):
  3453  
  3454  		return ErrBelowChanReserve
  3455  	}
  3456  
  3457  	// validateUpdates take a set of updates, and validates them against
  3458  	// the passed channel constraints.
  3459  	validateUpdates := func(updates []*PaymentDescriptor,
  3460  		constraints *channeldb.ChannelConfig) error {
  3461  
  3462  		// We keep track of the number of HTLCs in flight for the
  3463  		// commitment, and the amount in flight.
  3464  		var numInFlight uint16
  3465  		var amtInFlight lnwire.MilliAtom
  3466  
  3467  		// Go through all updates, checking that they don't violate the
  3468  		// channel constraints.
  3469  		for _, entry := range updates {
  3470  			if entry.EntryType == Add {
  3471  				// An HTLC is being added, this will add to the
  3472  				// number and amount in flight.
  3473  				amtInFlight += entry.Amount
  3474  				numInFlight++
  3475  
  3476  				// Check that the HTLC amount is positive.
  3477  				if entry.Amount == 0 {
  3478  					return ErrInvalidHTLCAmt
  3479  				}
  3480  
  3481  				// Check that the value of the HTLC they added
  3482  				// is above our minimum.
  3483  				if entry.Amount < constraints.MinHTLC {
  3484  					return ErrBelowMinHTLC
  3485  				}
  3486  			}
  3487  		}
  3488  
  3489  		// Now that we know the total value of added HTLCs, we check
  3490  		// that this satisfy the MaxPendingAmont contraint.
  3491  		if amtInFlight > constraints.MaxPendingAmount {
  3492  			return ErrMaxPendingAmount
  3493  		}
  3494  
  3495  		// In this step, we verify that the total number of active
  3496  		// HTLCs does not exceed the constraint of the maximum number
  3497  		// of HTLCs in flight.
  3498  		if numInFlight > constraints.MaxAcceptedHtlcs {
  3499  			return ErrMaxHTLCNumber
  3500  		}
  3501  
  3502  		return nil
  3503  	}
  3504  
  3505  	// First check that the remote updates won't violate it's channel
  3506  	// constraints.
  3507  	err = validateUpdates(
  3508  		filteredView.theirUpdates, &lc.channelState.RemoteChanCfg,
  3509  	)
  3510  	if err != nil {
  3511  		return err
  3512  	}
  3513  
  3514  	// Secondly check that our updates won't violate our channel
  3515  	// constraints.
  3516  	err = validateUpdates(
  3517  		filteredView.ourUpdates, &lc.channelState.LocalChanCfg,
  3518  	)
  3519  	if err != nil {
  3520  		return err
  3521  	}
  3522  
  3523  	return nil
  3524  }
  3525  
  3526  // SignNextCommitment signs a new commitment which includes any previous
  3527  // unsettled HTLCs, any new HTLCs, and any modifications to prior HTLCs
  3528  // committed in previous commitment updates. Signing a new commitment
  3529  // decrements the available revocation window by 1. After a successful method
  3530  // call, the remote party's commitment chain is extended by a new commitment
  3531  // which includes all updates to the HTLC log prior to this method invocation.
  3532  // The first return parameter is the signature for the commitment transaction
  3533  // itself, while the second parameter is a slice of all HTLC signatures (if
  3534  // any). The HTLC signatures are sorted according to the BIP 69 order of the
  3535  // HTLC's on the commitment transaction. Finally, the new set of pending HTLCs
  3536  // for the remote party's commitment are also returned.
  3537  func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []channeldb.HTLC, error) {
  3538  
  3539  	lc.Lock()
  3540  	defer lc.Unlock()
  3541  
  3542  	// Check for empty commit sig. This should never happen, but we don't
  3543  	// dare to fail hard here. We assume peers can deal with the empty sig
  3544  	// and continue channel operation. We log an error so that the bug
  3545  	// causing this can be tracked down.
  3546  	if !lc.oweCommitment(true) {
  3547  		lc.log.Errorf("sending empty commit sig")
  3548  	}
  3549  
  3550  	var (
  3551  		sig      lnwire.Sig
  3552  		htlcSigs []lnwire.Sig
  3553  	)
  3554  
  3555  	// If we're awaiting for an ACK to a commitment signature, or if we
  3556  	// don't yet have the initial next revocation point of the remote
  3557  	// party, then we're unable to create new states. Each time we create a
  3558  	// new state, we consume a prior revocation point.
  3559  	commitPoint := lc.channelState.RemoteNextRevocation
  3560  	if lc.remoteCommitChain.hasUnackedCommitment() || commitPoint == nil {
  3561  
  3562  		return sig, htlcSigs, nil, ErrNoWindow
  3563  	}
  3564  
  3565  	// Determine the last update on the remote log that has been locked in.
  3566  	remoteACKedIndex := lc.localCommitChain.tail().theirMessageIndex
  3567  	remoteHtlcIndex := lc.localCommitChain.tail().theirHtlcIndex
  3568  
  3569  	// Before we extend this new commitment to the remote commitment chain,
  3570  	// ensure that we aren't violating any of the constraints the remote
  3571  	// party set up when we initially set up the channel. If we are, then
  3572  	// we'll abort this state transition.
  3573  	err := lc.validateCommitmentSanity(
  3574  		remoteACKedIndex, lc.localUpdateLog.logIndex, true, nil, nil,
  3575  	)
  3576  	if err != nil {
  3577  		return sig, htlcSigs, nil, err
  3578  	}
  3579  
  3580  	// Grab the next commitment point for the remote party. This will be
  3581  	// used within fetchCommitmentView to derive all the keys necessary to
  3582  	// construct the commitment state.
  3583  	keyRing := DeriveCommitmentKeys(
  3584  		commitPoint, false, lc.channelState.ChanType,
  3585  		&lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg,
  3586  	)
  3587  
  3588  	// Create a new commitment view which will calculate the evaluated
  3589  	// state of the remote node's new commitment including our latest added
  3590  	// HTLCs. The view includes the latest balances for both sides on the
  3591  	// remote node's chain, and also update the addition height of any new
  3592  	// HTLC log entries. When we creating a new remote view, we include
  3593  	// _all_ of our changes (pending or committed) but only the remote
  3594  	// node's changes up to the last change we've ACK'd.
  3595  	newCommitView, err := lc.fetchCommitmentView(
  3596  		true, lc.localUpdateLog.logIndex, lc.localUpdateLog.htlcCounter,
  3597  		remoteACKedIndex, remoteHtlcIndex, keyRing,
  3598  	)
  3599  	if err != nil {
  3600  		return sig, htlcSigs, nil, err
  3601  	}
  3602  
  3603  	lc.log.Tracef("extending remote chain to height %v, "+
  3604  		"local_log=%v, remote_log=%v",
  3605  		newCommitView.height,
  3606  		lc.localUpdateLog.logIndex, remoteACKedIndex)
  3607  
  3608  	lc.log.Tracef("remote chain: our_balance=%v, "+
  3609  		"their_balance=%v, commit_tx: %v",
  3610  		newCommitView.ourBalance,
  3611  		newCommitView.theirBalance,
  3612  		newLogClosure(func() string {
  3613  			return spew.Sdump(cacheCommitmentTxHash(newCommitView).txn)
  3614  		}),
  3615  	)
  3616  
  3617  	// With the commitment view constructed, if there are any HTLC's, we'll
  3618  	// need to generate signatures of each of them for the remote party's
  3619  	// commitment state. We do so in two phases: first we generate and
  3620  	// submit the set of signature jobs to the worker pool.
  3621  	var leaseExpiry uint32
  3622  	if lc.channelState.ChanType.HasLeaseExpiration() {
  3623  		leaseExpiry = lc.channelState.ThawHeight
  3624  	}
  3625  	sigBatch, cancelChan, err := genRemoteHtlcSigJobs(
  3626  		keyRing, lc.channelState.ChanType, !lc.channelState.IsInitiator,
  3627  		leaseExpiry, &lc.channelState.LocalChanCfg,
  3628  		&lc.channelState.RemoteChanCfg, newCommitView,
  3629  	)
  3630  	if err != nil {
  3631  		return sig, htlcSigs, nil, err
  3632  	}
  3633  	lc.sigPool.SubmitSignBatch(sigBatch)
  3634  
  3635  	// While the jobs are being carried out, we'll Sign their version of
  3636  	// the new commitment transaction while we're waiting for the rest of
  3637  	// the HTLC signatures to be processed.
  3638  	rawSig, err := lc.Signer.SignOutputRaw(newCommitView.txn, lc.signDesc)
  3639  	if err != nil {
  3640  		close(cancelChan)
  3641  		return sig, htlcSigs, nil, err
  3642  	}
  3643  	sig, err = lnwire.NewSigFromSignature(rawSig)
  3644  	if err != nil {
  3645  		close(cancelChan)
  3646  		return sig, htlcSigs, nil, err
  3647  	}
  3648  
  3649  	// We'll need to send over the signatures to the remote party in the
  3650  	// order as they appear on the commitment transaction after BIP 69
  3651  	// sorting.
  3652  	sort.Slice(sigBatch, func(i, j int) bool {
  3653  		return sigBatch[i].OutputIndex < sigBatch[j].OutputIndex
  3654  	})
  3655  
  3656  	// With the jobs sorted, we'll now iterate through all the responses to
  3657  	// gather each of the signatures in order.
  3658  	htlcSigs = make([]lnwire.Sig, 0, len(sigBatch))
  3659  	for _, htlcSigJob := range sigBatch {
  3660  		jobResp := <-htlcSigJob.Resp
  3661  
  3662  		// If an error occurred, then we'll cancel any other active
  3663  		// jobs.
  3664  		if jobResp.Err != nil {
  3665  			close(cancelChan)
  3666  			return sig, htlcSigs, nil, jobResp.Err
  3667  		}
  3668  
  3669  		htlcSigs = append(htlcSigs, jobResp.Sig)
  3670  	}
  3671  
  3672  	// As we're about to proposer a new commitment state for the remote
  3673  	// party, we'll write this pending state to disk before we exit, so we
  3674  	// can retransmit it if necessary.
  3675  	commitDiff, err := lc.createCommitDiff(newCommitView, sig, htlcSigs)
  3676  	if err != nil {
  3677  		return sig, htlcSigs, nil, err
  3678  	}
  3679  	err = lc.channelState.AppendRemoteCommitChain(commitDiff)
  3680  	if err != nil {
  3681  		return sig, htlcSigs, nil, err
  3682  	}
  3683  
  3684  	// TODO(roasbeef): check that one eclair bug
  3685  	//  * need to retransmit on first state still?
  3686  	//  * after initial reconnect
  3687  
  3688  	// Extend the remote commitment chain by one with the addition of our
  3689  	// latest commitment update.
  3690  	lc.remoteCommitChain.addCommitment(newCommitView)
  3691  
  3692  	return sig, htlcSigs, commitDiff.Commitment.Htlcs, nil
  3693  }
  3694  
  3695  // ProcessChanSyncMsg processes a ChannelReestablish message sent by the remote
  3696  // connection upon re establishment of our connection with them. This method
  3697  // will return a single message if we are currently out of sync, otherwise a
  3698  // nil lnwire.Message will be returned. If it is decided that our level of
  3699  // de-synchronization is irreconcilable, then an error indicating the issue
  3700  // will be returned. In this case that an error is returned, the channel should
  3701  // be force closed, as we cannot continue updates.
  3702  //
  3703  // One of two message sets will be returned:
  3704  //
  3705  //   - CommitSig+Updates: if we have a pending remote commit which they claim to
  3706  //     have not received
  3707  //   - RevokeAndAck: if we sent a revocation message that they claim to have
  3708  //     not received
  3709  //
  3710  // If we detect a scenario where we need to send a CommitSig+Updates, this
  3711  // method also returns two sets channeldb.CircuitKeys identifying the circuits
  3712  // that were opened and closed, respectively, as a result of signing the
  3713  // previous commitment txn. This allows the link to clear its mailbox of those
  3714  // circuits in case they are still in memory, and ensure the switch's circuit
  3715  // map has been updated by deleting the closed circuits.
  3716  func (lc *LightningChannel) ProcessChanSyncMsg(
  3717  	msg *lnwire.ChannelReestablish) ([]lnwire.Message, []channeldb.CircuitKey,
  3718  	[]channeldb.CircuitKey, error) {
  3719  
  3720  	// Now we'll examine the state we have, vs what was contained in the
  3721  	// chain sync message. If we're de-synchronized, then we'll send a
  3722  	// batch of messages which when applied will kick start the chain
  3723  	// resync.
  3724  	var (
  3725  		updates        []lnwire.Message
  3726  		openedCircuits []channeldb.CircuitKey
  3727  		closedCircuits []channeldb.CircuitKey
  3728  	)
  3729  
  3730  	// If the remote party included the optional fields, then we'll verify
  3731  	// their correctness first, as it will influence our decisions below.
  3732  	hasRecoveryOptions := msg.LocalUnrevokedCommitPoint != nil
  3733  	if hasRecoveryOptions && msg.RemoteCommitTailHeight != 0 {
  3734  		// We'll check that they've really sent a valid commit
  3735  		// secret from our shachain for our prior height, but only if
  3736  		// this isn't the first state.
  3737  		heightSecret, err := lc.channelState.RevocationProducer.AtIndex(
  3738  			msg.RemoteCommitTailHeight - 1,
  3739  		)
  3740  		if err != nil {
  3741  			return nil, nil, nil, err
  3742  		}
  3743  		commitSecretCorrect := bytes.Equal(
  3744  			heightSecret[:], msg.LastRemoteCommitSecret[:],
  3745  		)
  3746  
  3747  		// If the commit secret they sent is incorrect then we'll fail
  3748  		// the channel as the remote node has an inconsistent state.
  3749  		if !commitSecretCorrect {
  3750  			// In this case, we'll return an error to indicate the
  3751  			// remote node sent us the wrong values. This will let
  3752  			// the caller act accordingly.
  3753  			lc.log.Errorf("sync failed: remote provided invalid " +
  3754  				"commit secret!")
  3755  			return nil, nil, nil, ErrInvalidLastCommitSecret
  3756  		}
  3757  	}
  3758  
  3759  	// If we detect that this is is a restored channel, then we can skip a
  3760  	// portion of the verification, as we already know that we're unable to
  3761  	// proceed with any updates.
  3762  	isRestoredChan := lc.channelState.HasChanStatus(
  3763  		channeldb.ChanStatusRestored,
  3764  	)
  3765  
  3766  	// Take note of our current commit chain heights before we begin adding
  3767  	// more to them.
  3768  	var (
  3769  		localTailHeight  = lc.localCommitChain.tail().height
  3770  		remoteTailHeight = lc.remoteCommitChain.tail().height
  3771  		remoteTipHeight  = lc.remoteCommitChain.tip().height
  3772  	)
  3773  
  3774  	// We'll now check that their view of our local chain is up-to-date.
  3775  	// This means checking that what their view of our local chain tail
  3776  	// height is what they believe. Note that the tail and tip height will
  3777  	// always be the same for the local chain at this stage, as we won't
  3778  	// store any received commitment to disk before it is ACKed.
  3779  	switch {
  3780  
  3781  	// If their reported height for our local chain tail is ahead of our
  3782  	// view, then we're behind!
  3783  	case msg.RemoteCommitTailHeight > localTailHeight || isRestoredChan:
  3784  		lc.log.Errorf("sync failed with local data loss: remote "+
  3785  			"believes our tail height is %v, while we have %v!",
  3786  			msg.RemoteCommitTailHeight, localTailHeight)
  3787  
  3788  		if isRestoredChan {
  3789  			lc.log.Warnf("detected restored triggering DLP")
  3790  		}
  3791  
  3792  		// We must check that we had recovery options to ensure the
  3793  		// commitment secret matched up, and the remote is just not
  3794  		// lying about its height.
  3795  		if !hasRecoveryOptions {
  3796  			// At this point we the remote is either lying about
  3797  			// its height, or we are actually behind but the remote
  3798  			// doesn't support data loss protection. In either case
  3799  			// it is not safe for us to keep using the channel, so
  3800  			// we mark it borked and fail the channel.
  3801  			lc.log.Errorf("sync failed: local data loss, but no " +
  3802  				"recovery option.")
  3803  
  3804  			return nil, nil, nil, ErrCannotSyncCommitChains
  3805  		}
  3806  
  3807  		// In this case, we've likely lost data and shouldn't proceed
  3808  		// with channel updates.
  3809  		return nil, nil, nil, &ErrCommitSyncLocalDataLoss{
  3810  			ChannelPoint: lc.channelState.FundingOutpoint,
  3811  			CommitPoint:  msg.LocalUnrevokedCommitPoint,
  3812  		}
  3813  
  3814  	// If the height of our commitment chain reported by the remote party
  3815  	// is behind our view of the chain, then they probably lost some state,
  3816  	// and we'll force close the channel.
  3817  	case msg.RemoteCommitTailHeight+1 < localTailHeight:
  3818  		lc.log.Errorf("sync failed: remote believes our tail height is "+
  3819  			"%v, while we have %v!",
  3820  			msg.RemoteCommitTailHeight, localTailHeight)
  3821  		return nil, nil, nil, ErrCommitSyncRemoteDataLoss
  3822  
  3823  	// Their view of our commit chain is consistent with our view.
  3824  	case msg.RemoteCommitTailHeight == localTailHeight:
  3825  		// In sync, don't have to do anything.
  3826  		lc.log.Debugf("sync: RemoteTailHeight %d == localTailHeight %d",
  3827  			msg.RemoteCommitTailHeight, localTailHeight)
  3828  
  3829  	// We owe them a revocation if the tail of our current commitment chain
  3830  	// is one greater than what they _think_ our commitment tail is. In
  3831  	// this case we'll re-send the last revocation message that we sent.
  3832  	// This will be the revocation message for our prior chain tail.
  3833  	case msg.RemoteCommitTailHeight+1 == localTailHeight:
  3834  		lc.log.Debugf("sync: remote believes our tail height is %v, "+
  3835  			"while we have %v, we owe them a revocation",
  3836  			msg.RemoteCommitTailHeight, localTailHeight)
  3837  
  3838  		revocationMsg, err := lc.generateRevocation(
  3839  			localTailHeight - 1,
  3840  		)
  3841  		if err != nil {
  3842  			return nil, nil, nil, err
  3843  		}
  3844  		updates = append(updates, revocationMsg)
  3845  
  3846  		// Next, as a precaution, we'll check a special edge case. If
  3847  		// they initiated a state transition, we sent the revocation,
  3848  		// but died before the signature was sent. We re-transmit our
  3849  		// revocation, but also initiate a state transition to re-sync
  3850  		// them.
  3851  		if lc.OweCommitment(true) {
  3852  			commitSig, htlcSigs, _, err := lc.SignNextCommitment()
  3853  			switch {
  3854  
  3855  			// If we signed this state, then we'll accumulate
  3856  			// another update to send over.
  3857  			case err == nil:
  3858  				updates = append(updates, &lnwire.CommitSig{
  3859  					ChanID: lnwire.NewChanIDFromOutPoint(
  3860  						&lc.channelState.FundingOutpoint,
  3861  					),
  3862  					CommitSig: commitSig,
  3863  					HtlcSigs:  htlcSigs,
  3864  				})
  3865  
  3866  			// If we get a failure due to not knowing their next
  3867  			// point, then this is fine as they'll either send
  3868  			// FundingLocked, or revoke their next state to allow
  3869  			// us to continue forwards.
  3870  			case err == ErrNoWindow:
  3871  
  3872  			// Otherwise, this is an error and we'll treat it as
  3873  			// such.
  3874  			default:
  3875  				return nil, nil, nil, err
  3876  			}
  3877  		}
  3878  
  3879  	// There should be no other possible states.
  3880  	default:
  3881  		lc.log.Errorf("sync failed: remote believes our tail height is "+
  3882  			"%v, while we have %v!",
  3883  			msg.RemoteCommitTailHeight, localTailHeight)
  3884  		return nil, nil, nil, ErrCannotSyncCommitChains
  3885  	}
  3886  
  3887  	// Now check if our view of the remote chain is consistent with what
  3888  	// they tell us.
  3889  	switch {
  3890  
  3891  	// The remote's view of what their next commit height is 2+ states
  3892  	// ahead of us, we most likely lost data, or the remote is trying to
  3893  	// trick us. Since we have no way of verifying whether they are lying
  3894  	// or not, we will fail the channel, but should not force close it
  3895  	// automatically.
  3896  	case msg.NextLocalCommitHeight > remoteTipHeight+1:
  3897  		lc.log.Errorf("sync failed: remote's next commit height is %v, "+
  3898  			"while we believe it is %v!",
  3899  			msg.NextLocalCommitHeight, remoteTipHeight)
  3900  
  3901  		return nil, nil, nil, ErrCannotSyncCommitChains
  3902  
  3903  	// They are waiting for a state they have already ACKed.
  3904  	case msg.NextLocalCommitHeight <= remoteTailHeight:
  3905  		lc.log.Errorf("sync failed: remote's next commit height is %v, "+
  3906  			"while we believe it is %v!",
  3907  			msg.NextLocalCommitHeight, remoteTipHeight)
  3908  
  3909  		// They previously ACKed our current tail, and now they are
  3910  		// waiting for it. They probably lost state.
  3911  		return nil, nil, nil, ErrCommitSyncRemoteDataLoss
  3912  
  3913  	// They have received our latest commitment, life is good.
  3914  	case msg.NextLocalCommitHeight == remoteTipHeight+1:
  3915  		lc.log.Debugf("sync: NextLocalCommitHeight %d == remoteTipHeight+1 %d",
  3916  			msg.NextLocalCommitHeight, remoteTipHeight+1)
  3917  
  3918  	// We owe them a commitment if the tip of their chain (from our Pov) is
  3919  	// equal to what they think their next commit height should be. We'll
  3920  	// re-send all the updates necessary to recreate this state, along
  3921  	// with the commit sig.
  3922  	case msg.NextLocalCommitHeight == remoteTipHeight:
  3923  		lc.log.Debugf("sync: remote's next commit height is %v, while "+
  3924  			"we believe it is %v, we owe them a commitment",
  3925  			msg.NextLocalCommitHeight, remoteTipHeight)
  3926  
  3927  		// Grab the current remote chain tip from the database.  This
  3928  		// commit diff contains all the information required to re-sync
  3929  		// our states.
  3930  		commitDiff, err := lc.channelState.RemoteCommitChainTip()
  3931  		if err != nil {
  3932  			return nil, nil, nil, err
  3933  		}
  3934  
  3935  		var commitUpdates []lnwire.Message
  3936  
  3937  		// Next, we'll need to send over any updates we sent as part of
  3938  		// this new proposed commitment state.
  3939  		for _, logUpdate := range commitDiff.LogUpdates {
  3940  			commitUpdates = append(commitUpdates, logUpdate.UpdateMsg)
  3941  		}
  3942  
  3943  		// With the batch of updates accumulated, we'll now re-send the
  3944  		// original CommitSig message required to re-sync their remote
  3945  		// commitment chain with our local version of their chain.
  3946  		commitUpdates = append(commitUpdates, commitDiff.CommitSig)
  3947  
  3948  		// NOTE: If a revocation is not owed, then updates is empty.
  3949  		if lc.channelState.LastWasRevoke {
  3950  			// If lastWasRevoke is set to true, a revocation was last and we
  3951  			// need to reorder the updates so that the revocation stored in
  3952  			// updates comes after the LogUpdates+CommitSig.
  3953  			//
  3954  			// ---logupdates--->
  3955  			// ---commitsig---->
  3956  			// ---revocation--->
  3957  			updates = append(commitUpdates, updates...)
  3958  		} else {
  3959  			// Otherwise, the revocation should come before LogUpdates
  3960  			// + CommitSig.
  3961  			//
  3962  			// ---revocation--->
  3963  			// ---logupdates--->
  3964  			// ---commitsig---->
  3965  			updates = append(updates, commitUpdates...)
  3966  		}
  3967  
  3968  		openedCircuits = commitDiff.OpenedCircuitKeys
  3969  		closedCircuits = commitDiff.ClosedCircuitKeys
  3970  
  3971  	// There should be no other possible states as long as the commit chain
  3972  	// can have at most two elements. If that's the case, something is
  3973  	// wrong.
  3974  	default:
  3975  		lc.log.Errorf("sync failed: remote's next commit height is %v, "+
  3976  			"while we believe it is %v!",
  3977  			msg.NextLocalCommitHeight, remoteTipHeight)
  3978  		return nil, nil, nil, ErrCannotSyncCommitChains
  3979  	}
  3980  
  3981  	// If we didn't have recovery options, then the final check cannot be
  3982  	// performed, and we'll return early.
  3983  	if !hasRecoveryOptions {
  3984  		return updates, openedCircuits, closedCircuits, nil
  3985  	}
  3986  
  3987  	// At this point we have determined that either the commit heights are
  3988  	// in sync, or that we are in a state we can recover from. As a final
  3989  	// check, we ensure that the commitment point sent to us by the remote
  3990  	// is valid.
  3991  	var commitPoint *secp256k1.PublicKey
  3992  	switch {
  3993  	// If their height is one beyond what we know their current height to
  3994  	// be, then we need to compare their current unrevoked commitment point
  3995  	// as that's what they should send.
  3996  	case msg.NextLocalCommitHeight == remoteTailHeight+1:
  3997  		commitPoint = lc.channelState.RemoteCurrentRevocation
  3998  
  3999  	// Alternatively, if their height is two beyond what we know their best
  4000  	// height to be, then they're holding onto two commitments, and the
  4001  	// highest unrevoked point is their next revocation.
  4002  	//
  4003  	// TODO(roasbeef): verify this in the spec...
  4004  	case msg.NextLocalCommitHeight == remoteTailHeight+2:
  4005  		commitPoint = lc.channelState.RemoteNextRevocation
  4006  	}
  4007  
  4008  	// Only if this is a tweakless channel will we attempt to verify the
  4009  	// commitment point, as otherwise it has no validity requirements.
  4010  	tweakless := lc.channelState.ChanType.IsTweakless()
  4011  	if !tweakless && commitPoint != nil &&
  4012  		!commitPoint.IsEqual(msg.LocalUnrevokedCommitPoint) {
  4013  
  4014  		lc.log.Errorf("sync failed: remote sent invalid commit point "+
  4015  			"for height %v!",
  4016  			msg.NextLocalCommitHeight)
  4017  		return nil, nil, nil, ErrInvalidLocalUnrevokedCommitPoint
  4018  	}
  4019  
  4020  	return updates, openedCircuits, closedCircuits, nil
  4021  }
  4022  
  4023  // computeView takes the given htlcView, and calculates the balances, filtered
  4024  // view (settling unsettled HTLCs), commitment size and feePerKB, after
  4025  // applying the HTLCs to the latest commitment. The returned balances are the
  4026  // balances *before* subtracting the commitment fee from the initiator's
  4027  // balance.
  4028  //
  4029  // If the updateState boolean is set true, the add and remove heights of the
  4030  // HTLCs will be set to the next commitment height.
  4031  func (lc *LightningChannel) computeView(view *htlcView, remoteChain bool,
  4032  	updateState bool) (lnwire.MilliAtom, lnwire.MilliAtom, int64,
  4033  	*htlcView, error) {
  4034  
  4035  	commitChain := lc.localCommitChain
  4036  	dustLimit := lc.channelState.LocalChanCfg.DustLimit
  4037  	if remoteChain {
  4038  		commitChain = lc.remoteCommitChain
  4039  		dustLimit = lc.channelState.RemoteChanCfg.DustLimit
  4040  	}
  4041  
  4042  	// Since the fetched htlc view will include all updates added after the
  4043  	// last committed state, we start with the balances reflecting that
  4044  	// state.
  4045  	ourBalance := commitChain.tip().ourBalance
  4046  	theirBalance := commitChain.tip().theirBalance
  4047  
  4048  	// Add the fee from the previous commitment state back to the
  4049  	// initiator's balance, so that the fee can be recalculated and
  4050  	// re-applied in case fee estimation parameters have changed or the
  4051  	// number of outstanding HTLCs has changed.
  4052  	if lc.channelState.IsInitiator {
  4053  		ourBalance += lnwire.NewMAtomsFromAtoms(
  4054  			commitChain.tip().fee)
  4055  	} else if !lc.channelState.IsInitiator {
  4056  		theirBalance += lnwire.NewMAtomsFromAtoms(
  4057  			commitChain.tip().fee)
  4058  	}
  4059  	nextHeight := commitChain.tip().height + 1
  4060  
  4061  	// Initiate feePerKB to the last committed fee for this chain as we'll
  4062  	// need this to determine which HTLCs are dust, and also the final fee
  4063  	// rate.
  4064  	view.feePerKB = commitChain.tip().feePerKB
  4065  
  4066  	// We evaluate the view at this stage, meaning settled and failed HTLCs
  4067  	// will remove their corresponding added HTLCs.  The resulting filtered
  4068  	// view will only have Add entries left, making it easy to compare the
  4069  	// channel constraints to the final commitment state. If any fee
  4070  	// updates are found in the logs, the commitment fee rate should be
  4071  	// changed, so we'll also set the feePerKB to this new value.
  4072  	filteredHTLCView, err := lc.evaluateHTLCView(view, &ourBalance,
  4073  		&theirBalance, nextHeight, remoteChain, updateState)
  4074  	if err != nil {
  4075  		return 0, 0, 0, nil, err
  4076  	}
  4077  	feePerKB := filteredHTLCView.feePerKB
  4078  
  4079  	// Now go through all HTLCs at this stage, to calculate the total size,
  4080  	// needed to calculate the transaction fee.
  4081  	var totalHtlcSize int64
  4082  	for _, htlc := range filteredHTLCView.ourUpdates {
  4083  		if HtlcIsDust(
  4084  			lc.channelState.ChanType, false, !remoteChain,
  4085  			feePerKB, htlc.Amount.ToAtoms(), dustLimit,
  4086  		) {
  4087  			continue
  4088  		}
  4089  
  4090  		totalHtlcSize += input.HTLCOutputSize
  4091  	}
  4092  	for _, htlc := range filteredHTLCView.theirUpdates {
  4093  		if HtlcIsDust(
  4094  			lc.channelState.ChanType, true, !remoteChain,
  4095  			feePerKB, htlc.Amount.ToAtoms(), dustLimit,
  4096  		) {
  4097  			continue
  4098  		}
  4099  
  4100  		totalHtlcSize += input.HTLCOutputSize
  4101  	}
  4102  
  4103  	totalCommitSize := CommitSize(lc.channelState.ChanType) +
  4104  		totalHtlcSize
  4105  	return ourBalance, theirBalance, totalCommitSize, filteredHTLCView, nil
  4106  }
  4107  
  4108  // genHtlcSigValidationJobs generates a series of signatures verification jobs
  4109  // meant to verify all the signatures for HTLC's attached to a newly created
  4110  // commitment state. The jobs generated are fully populated, and can be sent
  4111  // directly into the pool of workers.
  4112  func genHtlcSigValidationJobs(localCommitmentView *commitment,
  4113  	keyRing *CommitmentKeyRing, htlcSigs []lnwire.Sig,
  4114  	chanType channeldb.ChannelType, isLocalInitiator bool, leaseExpiry uint32,
  4115  	localChanCfg, remoteChanCfg *channeldb.ChannelConfig) ([]VerifyJob, error) {
  4116  
  4117  	txHash := localCommitmentView.txn.TxHash()
  4118  	feePerKB := localCommitmentView.feePerKB
  4119  	sigHashType := HtlcSigHashType(chanType)
  4120  
  4121  	// With the required state generated, we'll create a slice with large
  4122  	// enough capacity to hold verification jobs for all HTLC's in this
  4123  	// view. In the case that we have some dust outputs, then the actual
  4124  	// length will be smaller than the total capacity.
  4125  	numHtlcs := (len(localCommitmentView.incomingHTLCs) +
  4126  		len(localCommitmentView.outgoingHTLCs))
  4127  	verifyJobs := make([]VerifyJob, 0, numHtlcs)
  4128  
  4129  	// We'll iterate through each output in the commitment transaction,
  4130  	// populating the sigHash closure function if it's detected to be an
  4131  	// HLTC output. Given the sighash, and the signing key, we'll be able
  4132  	// to validate each signature within the worker pool.
  4133  	i := 0
  4134  	for index := range localCommitmentView.txn.TxOut {
  4135  		var (
  4136  			htlcIndex uint64
  4137  			sigHash   func() ([]byte, error)
  4138  			sig       *ecdsa.Signature
  4139  			err       error
  4140  		)
  4141  
  4142  		outputIndex := int32(index)
  4143  		switch {
  4144  
  4145  		// If this output index is found within the incoming HTLC
  4146  		// index, then this means that we need to generate an HTLC
  4147  		// success transaction in order to validate the signature.
  4148  		case localCommitmentView.incomingHTLCIndex[outputIndex] != nil:
  4149  			htlc := localCommitmentView.incomingHTLCIndex[outputIndex]
  4150  
  4151  			htlcIndex = htlc.HtlcIndex
  4152  
  4153  			sigHash = func() ([]byte, error) {
  4154  				op := wire.OutPoint{
  4155  					Hash:  txHash,
  4156  					Index: uint32(htlc.localOutputIndex),
  4157  				}
  4158  
  4159  				htlcFee := HtlcSuccessFee(chanType, feePerKB)
  4160  				outputAmt := htlc.Amount.ToAtoms() - htlcFee
  4161  
  4162  				successTx, err := CreateHtlcSuccessTx(
  4163  					chanType, isLocalInitiator, op,
  4164  					outputAmt, uint32(localChanCfg.CsvDelay),
  4165  					leaseExpiry, keyRing.RevocationKey,
  4166  					keyRing.ToLocalKey,
  4167  				)
  4168  				if err != nil {
  4169  					return nil, err
  4170  				}
  4171  
  4172  				sigHash, err := txscript.CalcSignatureHash(
  4173  					htlc.ourWitnessScript,
  4174  					sigHashType, successTx, 0,
  4175  					nil,
  4176  				)
  4177  				if err != nil {
  4178  					return nil, err
  4179  				}
  4180  
  4181  				return sigHash, nil
  4182  			}
  4183  
  4184  			// Make sure there are more signatures left.
  4185  			if i >= len(htlcSigs) {
  4186  				return nil, fmt.Errorf("not enough HTLC " +
  4187  					"signatures")
  4188  			}
  4189  
  4190  			// With the sighash generated, we'll also store the
  4191  			// signature so it can be written to disk if this state
  4192  			// is valid.
  4193  			sig, err = htlcSigs[i].ToSignature()
  4194  			if err != nil {
  4195  				return nil, err
  4196  			}
  4197  			htlc.sig = sig
  4198  
  4199  		// Otherwise, if this is an outgoing HTLC, then we'll need to
  4200  		// generate a timeout transaction so we can verify the
  4201  		// signature presented.
  4202  		case localCommitmentView.outgoingHTLCIndex[outputIndex] != nil:
  4203  			htlc := localCommitmentView.outgoingHTLCIndex[outputIndex]
  4204  
  4205  			htlcIndex = htlc.HtlcIndex
  4206  
  4207  			sigHash = func() ([]byte, error) {
  4208  				op := wire.OutPoint{
  4209  					Hash:  txHash,
  4210  					Index: uint32(htlc.localOutputIndex),
  4211  				}
  4212  
  4213  				htlcFee := HtlcTimeoutFee(chanType, feePerKB)
  4214  				outputAmt := htlc.Amount.ToAtoms() - htlcFee
  4215  
  4216  				timeoutTx, err := CreateHtlcTimeoutTx(
  4217  					chanType, isLocalInitiator, op,
  4218  					outputAmt, htlc.Timeout,
  4219  					uint32(localChanCfg.CsvDelay), leaseExpiry,
  4220  					keyRing.RevocationKey, keyRing.ToLocalKey,
  4221  				)
  4222  				if err != nil {
  4223  					return nil, err
  4224  				}
  4225  
  4226  				sigHash, err := txscript.CalcSignatureHash(
  4227  					htlc.ourWitnessScript,
  4228  					sigHashType, timeoutTx, 0,
  4229  					nil,
  4230  				)
  4231  				if err != nil {
  4232  					return nil, err
  4233  				}
  4234  
  4235  				return sigHash, nil
  4236  			}
  4237  
  4238  			// Make sure there are more signatures left.
  4239  			if i >= len(htlcSigs) {
  4240  				return nil, fmt.Errorf("not enough HTLC " +
  4241  					"signatures")
  4242  			}
  4243  
  4244  			// With the sighash generated, we'll also store the
  4245  			// signature so it can be written to disk if this state
  4246  			// is valid.
  4247  			sig, err = htlcSigs[i].ToSignature()
  4248  			if err != nil {
  4249  				return nil, err
  4250  			}
  4251  			htlc.sig = sig
  4252  
  4253  		default:
  4254  			continue
  4255  		}
  4256  
  4257  		verifyJobs = append(verifyJobs, VerifyJob{
  4258  			HtlcIndex: htlcIndex,
  4259  			PubKey:    keyRing.RemoteHtlcKey,
  4260  			Sig:       sig,
  4261  			SigHash:   sigHash,
  4262  		})
  4263  
  4264  		i++
  4265  	}
  4266  
  4267  	// If we received a number of HTLC signatures that doesn't match our
  4268  	// commitment, we'll return an error now.
  4269  	if len(htlcSigs) != i {
  4270  		return nil, fmt.Errorf("number of htlc sig mismatch. "+
  4271  			"Expected %v sigs, got %v", i, len(htlcSigs))
  4272  	}
  4273  
  4274  	return verifyJobs, nil
  4275  }
  4276  
  4277  // InvalidCommitSigError is a struct that implements the error interface to
  4278  // report a failure to validate a commitment signature for a remote peer.
  4279  // We'll use the items in this struct to generate a rich error message for the
  4280  // remote peer when we receive an invalid signature from it. Doing so can
  4281  // greatly aide in debugging cross implementation issues.
  4282  type InvalidCommitSigError struct {
  4283  	commitHeight uint64
  4284  
  4285  	commitSig []byte
  4286  
  4287  	sigHash []byte
  4288  
  4289  	commitTx []byte
  4290  }
  4291  
  4292  // Error returns a detailed error string including the exact transaction that
  4293  // caused an invalid commitment signature.
  4294  func (i *InvalidCommitSigError) Error() string {
  4295  	return fmt.Sprintf("rejected commitment: commit_height=%v, "+
  4296  		"invalid_commit_sig=%x, commit_tx=%x, sig_hash=%x", i.commitHeight,
  4297  		i.commitSig, i.commitTx, i.sigHash)
  4298  }
  4299  
  4300  // A compile time flag to ensure that InvalidCommitSigError implements the
  4301  // error interface.
  4302  var _ error = (*InvalidCommitSigError)(nil)
  4303  
  4304  // InvalidHtlcSigError is a struct that implements the error interface to
  4305  // report a failure to validate an htlc signature from a remote peer. We'll use
  4306  // the items in this struct to generate a rich error message for the remote
  4307  // peer when we receive an invalid signature from it. Doing so can greatly aide
  4308  // in debugging across implementation issues.
  4309  type InvalidHtlcSigError struct {
  4310  	commitHeight uint64
  4311  
  4312  	htlcSig []byte
  4313  
  4314  	htlcIndex uint64
  4315  
  4316  	sigHash []byte
  4317  
  4318  	commitTx []byte
  4319  }
  4320  
  4321  // Error returns a detailed error string including the exact transaction that
  4322  // caused an invalid htlc signature.
  4323  func (i *InvalidHtlcSigError) Error() string {
  4324  	return fmt.Sprintf("rejected commitment: commit_height=%v, "+
  4325  		"invalid_htlc_sig=%x, commit_tx=%x, sig_hash=%x", i.commitHeight,
  4326  		i.htlcSig, i.commitTx, i.sigHash)
  4327  }
  4328  
  4329  // A compile time flag to ensure that InvalidCommitSigError implements the
  4330  // error interface.
  4331  var _ error = (*InvalidCommitSigError)(nil)
  4332  
  4333  // ReceiveNewCommitment process a signature for a new commitment state sent by
  4334  // the remote party. This method should be called in response to the
  4335  // remote party initiating a new change, or when the remote party sends a
  4336  // signature fully accepting a new state we've initiated. If we are able to
  4337  // successfully validate the signature, then the generated commitment is added
  4338  // to our local commitment chain. Once we send a revocation for our prior
  4339  // state, then this newly added commitment becomes our current accepted channel
  4340  // state.
  4341  func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig,
  4342  	htlcSigs []lnwire.Sig) error {
  4343  
  4344  	lc.Lock()
  4345  	defer lc.Unlock()
  4346  
  4347  	// Check for empty commit sig. Because of a previously existing bug, it
  4348  	// is possible that we receive an empty commit sig from nodes running an
  4349  	// older version. This is a relaxation of the spec, but it is still
  4350  	// possible to handle it. To not break any channels with those older
  4351  	// nodes, we just log the event. This check is also not totally
  4352  	// reliable, because it could be that we've sent out a new sig, but the
  4353  	// remote hasn't received it yet. We could then falsely assume that they
  4354  	// should add our updates to their remote commitment tx.
  4355  	if !lc.oweCommitment(false) {
  4356  		lc.log.Warnf("empty commit sig message received")
  4357  	}
  4358  
  4359  	// Determine the last update on the local log that has been locked in.
  4360  	localACKedIndex := lc.remoteCommitChain.tail().ourMessageIndex
  4361  	localHtlcIndex := lc.remoteCommitChain.tail().ourHtlcIndex
  4362  
  4363  	// Ensure that this new local update from the remote node respects all
  4364  	// the constraints we specified during initial channel setup. If not,
  4365  	// then we'll abort the channel as they've violated our constraints.
  4366  	err := lc.validateCommitmentSanity(
  4367  		lc.remoteUpdateLog.logIndex, localACKedIndex, false, nil, nil,
  4368  	)
  4369  	if err != nil {
  4370  		return err
  4371  	}
  4372  
  4373  	// We're receiving a new commitment which attempts to extend our local
  4374  	// commitment chain height by one, so fetch the proper commitment point
  4375  	// as this will be needed to derive the keys required to construct the
  4376  	// commitment.
  4377  	nextHeight := lc.currentHeight + 1
  4378  	commitSecret, err := lc.channelState.RevocationProducer.AtIndex(nextHeight)
  4379  	if err != nil {
  4380  		return err
  4381  	}
  4382  	commitPoint := input.ComputeCommitmentPoint(commitSecret[:])
  4383  	keyRing := DeriveCommitmentKeys(
  4384  		commitPoint, true, lc.channelState.ChanType,
  4385  		&lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg,
  4386  	)
  4387  
  4388  	// With the current commitment point re-calculated, construct the new
  4389  	// commitment view which includes all the entries (pending or committed)
  4390  	// we know of in the remote node's HTLC log, but only our local changes
  4391  	// up to the last change the remote node has ACK'd.
  4392  	localCommitmentView, err := lc.fetchCommitmentView(
  4393  		false, localACKedIndex, localHtlcIndex,
  4394  		lc.remoteUpdateLog.logIndex, lc.remoteUpdateLog.htlcCounter,
  4395  		keyRing,
  4396  	)
  4397  	if err != nil {
  4398  		return err
  4399  	}
  4400  
  4401  	lc.log.Tracef("extending local chain to height %v, "+
  4402  		"local_log=%v, remote_log=%v",
  4403  		localCommitmentView.height,
  4404  		localACKedIndex, lc.remoteUpdateLog.logIndex)
  4405  
  4406  	lc.log.Tracef("local chain: our_balance=%v, "+
  4407  		"their_balance=%v, commit_tx: %v",
  4408  		localCommitmentView.ourBalance, localCommitmentView.theirBalance,
  4409  		newLogClosure(func() string {
  4410  			return spew.Sdump(cacheCommitmentTxHash(localCommitmentView).txn)
  4411  		}),
  4412  	)
  4413  
  4414  	// Construct the sighash of the commitment transaction corresponding to
  4415  	// this newly proposed state update.
  4416  	localCommitTx := localCommitmentView.txn
  4417  	multiSigScript := lc.signDesc.WitnessScript
  4418  	sigHash, err := txscript.CalcSignatureHash(
  4419  		multiSigScript, txscript.SigHashAll, localCommitTx, 0, nil,
  4420  	)
  4421  	if err != nil {
  4422  		// TODO(roasbeef): fetchview has already mutated the HTLCs...
  4423  		//  * need to either roll-back, or make pure
  4424  		return err
  4425  	}
  4426  
  4427  	// As an optimization, we'll generate a series of jobs for the worker
  4428  	// pool to verify each of the HTLc signatures presented. Once
  4429  	// generated, we'll submit these jobs to the worker pool.
  4430  	var leaseExpiry uint32
  4431  	if lc.channelState.ChanType.HasLeaseExpiration() {
  4432  		leaseExpiry = lc.channelState.ThawHeight
  4433  	}
  4434  	verifyJobs, err := genHtlcSigValidationJobs(
  4435  		localCommitmentView, keyRing, htlcSigs,
  4436  		lc.channelState.ChanType, lc.channelState.IsInitiator,
  4437  		leaseExpiry, &lc.channelState.LocalChanCfg,
  4438  		&lc.channelState.RemoteChanCfg,
  4439  	)
  4440  	if err != nil {
  4441  		return err
  4442  	}
  4443  
  4444  	cancelChan := make(chan struct{})
  4445  	verifyResps := lc.sigPool.SubmitVerifyBatch(verifyJobs, cancelChan)
  4446  
  4447  	// While the HTLC verification jobs are proceeding asynchronously,
  4448  	// we'll ensure that the newly constructed commitment state has a valid
  4449  	// signature.
  4450  	verifyKey := *lc.channelState.RemoteChanCfg.MultiSigKey.PubKey
  4451  	cSig, err := commitSig.ToSignature()
  4452  	if err != nil {
  4453  		return err
  4454  	}
  4455  	if !cSig.Verify(sigHash, &verifyKey) {
  4456  		close(cancelChan)
  4457  
  4458  		// If we fail to validate their commitment signature, we'll
  4459  		// generate a special error to send over the protocol. We'll
  4460  		// include the exact signature and commitment we failed to
  4461  		// verify against in order to aide debugging.
  4462  		var txBytes bytes.Buffer
  4463  		txBytes.Grow(localCommitTx.SerializeSize())
  4464  		localCommitTx.Serialize(&txBytes)
  4465  		return &InvalidCommitSigError{
  4466  			commitHeight: nextHeight,
  4467  			commitSig:    commitSig.ToSignatureBytes(),
  4468  			sigHash:      sigHash,
  4469  			commitTx:     txBytes.Bytes(),
  4470  		}
  4471  	}
  4472  
  4473  	// With the primary commitment transaction validated, we'll check each
  4474  	// of the HTLC validation jobs.
  4475  	for i := 0; i < len(verifyJobs); i++ {
  4476  		// In the case that a single signature is invalid, we'll exit
  4477  		// early and cancel all the outstanding verification jobs.
  4478  		htlcErr := <-verifyResps
  4479  		if htlcErr != nil {
  4480  			close(cancelChan)
  4481  
  4482  			sig, err := lnwire.NewSigFromSignature(
  4483  				htlcErr.Sig,
  4484  			)
  4485  			if err != nil {
  4486  				return err
  4487  			}
  4488  			sigHash, err := htlcErr.SigHash()
  4489  			if err != nil {
  4490  				return err
  4491  			}
  4492  
  4493  			var txBytes bytes.Buffer
  4494  			txBytes.Grow(localCommitTx.SerializeSize())
  4495  			localCommitTx.Serialize(&txBytes)
  4496  			return &InvalidHtlcSigError{
  4497  				commitHeight: nextHeight,
  4498  				htlcSig:      sig.ToSignatureBytes(),
  4499  				htlcIndex:    htlcErr.HtlcIndex,
  4500  				sigHash:      sigHash,
  4501  				commitTx:     txBytes.Bytes(),
  4502  			}
  4503  		}
  4504  	}
  4505  
  4506  	// The signature checks out, so we can now add the new commitment to
  4507  	// our local commitment chain.
  4508  	localCommitmentView.sig = commitSig.ToSignatureBytes()
  4509  	lc.localCommitChain.addCommitment(localCommitmentView)
  4510  
  4511  	return nil
  4512  }
  4513  
  4514  // IsChannelClean returns true if neither side has pending commitments, neither
  4515  // side has HTLC's, and all updates are locked in irrevocably. Internally, it
  4516  // utilizes the oweCommitment function by calling it for local and remote
  4517  // evaluation. We check if we have a pending commitment for our local state
  4518  // since this function may be called by sub-systems that are not the link (e.g.
  4519  // the rpcserver), and the ReceiveNewCommitment & RevokeCurrentCommitment calls
  4520  // are not atomic, even though link processing ensures no updates can happen in
  4521  // between.
  4522  func (lc *LightningChannel) IsChannelClean() bool {
  4523  	lc.RLock()
  4524  	defer lc.RUnlock()
  4525  
  4526  	// Check whether we have a pending commitment for our local state.
  4527  	if lc.localCommitChain.hasUnackedCommitment() {
  4528  		return false
  4529  	}
  4530  
  4531  	// Check whether our counterparty has a pending commitment for their
  4532  	// state.
  4533  	if lc.remoteCommitChain.hasUnackedCommitment() {
  4534  		return false
  4535  	}
  4536  
  4537  	// We call ActiveHtlcs to ensure there are no HTLCs on either
  4538  	// commitment.
  4539  	if len(lc.channelState.ActiveHtlcs()) != 0 {
  4540  		return false
  4541  	}
  4542  
  4543  	// Now check that both local and remote commitments are signing the
  4544  	// same updates.
  4545  	if lc.oweCommitment(true) {
  4546  		return false
  4547  	}
  4548  
  4549  	if lc.oweCommitment(false) {
  4550  		return false
  4551  	}
  4552  
  4553  	// If we reached this point, the channel has no HTLCs and both
  4554  	// commitments sign the same updates.
  4555  	return true
  4556  }
  4557  
  4558  // OweCommitment returns a boolean value reflecting whether we need to send
  4559  // out a commitment signature because there are outstanding local updates and/or
  4560  // updates in the local commit tx that aren't reflected in the remote commit tx
  4561  // yet.
  4562  func (lc *LightningChannel) OweCommitment(local bool) bool {
  4563  	lc.RLock()
  4564  	defer lc.RUnlock()
  4565  
  4566  	return lc.oweCommitment(local)
  4567  }
  4568  
  4569  // oweCommitment is the internal version of OweCommitment. This function expects
  4570  // to be executed with a lock held.
  4571  func (lc *LightningChannel) oweCommitment(local bool) bool {
  4572  	var (
  4573  		remoteUpdatesPending, localUpdatesPending bool
  4574  
  4575  		lastLocalCommit  = lc.localCommitChain.tip()
  4576  		lastRemoteCommit = lc.remoteCommitChain.tip()
  4577  
  4578  		perspective string
  4579  	)
  4580  
  4581  	if local {
  4582  		perspective = "local"
  4583  
  4584  		// There are local updates pending if our local update log is
  4585  		// not in sync with our remote commitment tx.
  4586  		localUpdatesPending = lc.localUpdateLog.logIndex !=
  4587  			lastRemoteCommit.ourMessageIndex
  4588  
  4589  		// There are remote updates pending if their remote commitment
  4590  		// tx (our local commitment tx) contains updates that we don't
  4591  		// have added to our remote commitment tx yet.
  4592  		remoteUpdatesPending = lastLocalCommit.theirMessageIndex !=
  4593  			lastRemoteCommit.theirMessageIndex
  4594  
  4595  	} else {
  4596  		perspective = "remote"
  4597  
  4598  		// There are local updates pending (local updates from the
  4599  		// perspective of the remote party) if the remote party has
  4600  		// updates to their remote tx pending for which they haven't
  4601  		// signed yet.
  4602  		localUpdatesPending = lc.remoteUpdateLog.logIndex !=
  4603  			lastLocalCommit.theirMessageIndex
  4604  
  4605  		// There are remote updates pending (remote updates from the
  4606  		// perspective of the remote party) if we have updates on our
  4607  		// remote commitment tx that they haven't added to theirs yet.
  4608  		remoteUpdatesPending = lastRemoteCommit.ourMessageIndex !=
  4609  			lastLocalCommit.ourMessageIndex
  4610  	}
  4611  
  4612  	// If any of the conditions above is true, we owe a commitment
  4613  	// signature.
  4614  	oweCommitment := localUpdatesPending || remoteUpdatesPending
  4615  
  4616  	lc.log.Tracef("%v owes commit: %v (local updates: %v, "+
  4617  		"remote updates %v)", perspective, oweCommitment,
  4618  		localUpdatesPending, remoteUpdatesPending)
  4619  
  4620  	return oweCommitment
  4621  }
  4622  
  4623  // PendingLocalUpdateCount returns the number of local updates that still need
  4624  // to be applied to the remote commitment tx.
  4625  func (lc *LightningChannel) PendingLocalUpdateCount() uint64 {
  4626  	lc.RLock()
  4627  	defer lc.RUnlock()
  4628  
  4629  	lastRemoteCommit := lc.remoteCommitChain.tip()
  4630  
  4631  	return lc.localUpdateLog.logIndex - lastRemoteCommit.ourMessageIndex
  4632  }
  4633  
  4634  // RevokeCurrentCommitment revokes the next lowest unrevoked commitment
  4635  // transaction in the local commitment chain. As a result the edge of our
  4636  // revocation window is extended by one, and the tail of our local commitment
  4637  // chain is advanced by a single commitment. This now lowest unrevoked
  4638  // commitment becomes our currently accepted state within the channel. This
  4639  // method also returns the set of HTLC's currently active within the commitment
  4640  // transaction. This return value allows callers to act once an HTLC has been
  4641  // locked into our commitment transaction.
  4642  func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []channeldb.HTLC, error) {
  4643  	lc.Lock()
  4644  	defer lc.Unlock()
  4645  
  4646  	revocationMsg, err := lc.generateRevocation(lc.currentHeight)
  4647  	if err != nil {
  4648  		return nil, nil, err
  4649  	}
  4650  
  4651  	lc.log.Tracef("revoking height=%v, now at height=%v",
  4652  		lc.localCommitChain.tail().height,
  4653  		lc.currentHeight+1)
  4654  
  4655  	// Advance our tail, as we've revoked our previous state.
  4656  	lc.localCommitChain.advanceTail()
  4657  	lc.currentHeight++
  4658  
  4659  	// Additionally, generate a channel delta for this state transition for
  4660  	// persistent storage.
  4661  	chainTail := lc.localCommitChain.tail()
  4662  	newCommitment := chainTail.toDiskCommit(true)
  4663  
  4664  	// Get the unsigned acked remotes updates that are currently in memory.
  4665  	// We need them after a restart to sync our remote commitment with what
  4666  	// is committed locally.
  4667  	unsignedAckedUpdates := lc.getUnsignedAckedUpdates()
  4668  
  4669  	err = lc.channelState.UpdateCommitment(
  4670  		newCommitment, unsignedAckedUpdates,
  4671  	)
  4672  	if err != nil {
  4673  		return nil, nil, err
  4674  	}
  4675  
  4676  	lc.log.Tracef("state transition accepted: "+
  4677  		"our_balance=%v, their_balance=%v, unsigned_acked_updates=%v",
  4678  		chainTail.ourBalance,
  4679  		chainTail.theirBalance,
  4680  		len(unsignedAckedUpdates))
  4681  
  4682  	revocationMsg.ChanID = lnwire.NewChanIDFromOutPoint(
  4683  		&lc.channelState.FundingOutpoint,
  4684  	)
  4685  
  4686  	return revocationMsg, newCommitment.Htlcs, nil
  4687  }
  4688  
  4689  // ReceiveRevocation processes a revocation sent by the remote party for the
  4690  // lowest unrevoked commitment within their commitment chain. We receive a
  4691  // revocation either during the initial session negotiation wherein revocation
  4692  // windows are extended, or in response to a state update that we initiate. If
  4693  // successful, then the remote commitment chain is advanced by a single
  4694  // commitment, and a log compaction is attempted.
  4695  //
  4696  // The returned values correspond to:
  4697  //  1. The forwarding package corresponding to the remote commitment height
  4698  //     that was revoked.
  4699  //  2. The PaymentDescriptor of any Add HTLCs that were locked in by this
  4700  //     revocation.
  4701  //  3. The PaymentDescriptor of any Settle/Fail HTLCs that were locked in by
  4702  //     this revocation.
  4703  //  4. The set of HTLCs present on the current valid commitment transaction
  4704  //     for the remote party.
  4705  func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
  4706  	*channeldb.FwdPkg, []*PaymentDescriptor, []*PaymentDescriptor,
  4707  	[]channeldb.HTLC, error) {
  4708  
  4709  	lc.Lock()
  4710  	defer lc.Unlock()
  4711  
  4712  	// Ensure that the new pre-image can be placed in preimage store.
  4713  	store := lc.channelState.RevocationStore
  4714  	revocationHash, err := chainhash.NewHash(revMsg.Revocation[:])
  4715  	if err != nil {
  4716  		return nil, nil, nil, nil, err
  4717  	}
  4718  	revocation := (*shachain.ShaHash)(revocationHash)
  4719  	if err := store.AddNextEntry(revocation); err != nil {
  4720  		return nil, nil, nil, nil, err
  4721  	}
  4722  
  4723  	// Verify that if we use the commitment point computed based off of the
  4724  	// revealed secret to derive a revocation key with our revocation base
  4725  	// point, then it matches the current revocation of the remote party.
  4726  	currentCommitPoint := lc.channelState.RemoteCurrentRevocation
  4727  	derivedCommitPoint := input.ComputeCommitmentPoint(revMsg.Revocation[:])
  4728  	if !derivedCommitPoint.IsEqual(currentCommitPoint) {
  4729  		return nil, nil, nil, nil, fmt.Errorf("revocation key mismatch")
  4730  	}
  4731  
  4732  	// Now that we've verified that the prior commitment has been properly
  4733  	// revoked, we'll advance the revocation state we track for the remote
  4734  	// party: the new current revocation is what was previously the next
  4735  	// revocation, and the new next revocation is set to the key included
  4736  	// in the message.
  4737  	lc.channelState.RemoteCurrentRevocation = lc.channelState.RemoteNextRevocation
  4738  	lc.channelState.RemoteNextRevocation = revMsg.NextRevocationKey
  4739  
  4740  	lc.log.Tracef("remote party accepted state transition, revoked height "+
  4741  		"%v, now at %v",
  4742  		lc.remoteCommitChain.tail().height,
  4743  		lc.remoteCommitChain.tail().height+1)
  4744  
  4745  	// Add one to the remote tail since this will be height *after* we write
  4746  	// the revocation to disk, the local height will remain unchanged.
  4747  	remoteChainTail := lc.remoteCommitChain.tail().height + 1
  4748  	localChainTail := lc.localCommitChain.tail().height
  4749  
  4750  	source := lc.ShortChanID()
  4751  	chanID := lnwire.NewChanIDFromOutPoint(&lc.channelState.FundingOutpoint)
  4752  
  4753  	// Determine the set of htlcs that can be forwarded as a result of
  4754  	// having received the revocation. We will simultaneously construct the
  4755  	// log updates and payment descriptors, allowing us to persist the log
  4756  	// updates to disk and optimistically buffer the forwarding package in
  4757  	// memory.
  4758  	var (
  4759  		addsToForward        []*PaymentDescriptor
  4760  		addUpdates           []channeldb.LogUpdate
  4761  		settleFailsToForward []*PaymentDescriptor
  4762  		settleFailUpdates    []channeldb.LogUpdate
  4763  	)
  4764  
  4765  	var addIndex, settleFailIndex uint16
  4766  	for e := lc.remoteUpdateLog.Front(); e != nil; e = e.Next() {
  4767  		pd := e.Value.(*PaymentDescriptor)
  4768  
  4769  		// Fee updates are local to this particular channel, and should
  4770  		// never be forwarded.
  4771  		if pd.EntryType == FeeUpdate {
  4772  			continue
  4773  		}
  4774  
  4775  		if pd.isForwarded {
  4776  			continue
  4777  		}
  4778  
  4779  		// For each type of HTLC, we will only consider forwarding it if
  4780  		// both of the remote and local heights are non-zero. If either
  4781  		// of these values is zero, it has yet to be committed in both
  4782  		// the local and remote chains.
  4783  		committedAdd := pd.addCommitHeightRemote > 0 &&
  4784  			pd.addCommitHeightLocal > 0
  4785  		committedRmv := pd.removeCommitHeightRemote > 0 &&
  4786  			pd.removeCommitHeightLocal > 0
  4787  
  4788  		// Using the height of the remote and local commitments,
  4789  		// preemptively compute whether or not to forward this HTLC for
  4790  		// the case in which this in an Add HTLC, or if this is a
  4791  		// Settle, Fail, or MalformedFail.
  4792  		shouldFwdAdd := remoteChainTail == pd.addCommitHeightRemote &&
  4793  			localChainTail >= pd.addCommitHeightLocal
  4794  		shouldFwdRmv := remoteChainTail == pd.removeCommitHeightRemote &&
  4795  			localChainTail >= pd.removeCommitHeightLocal
  4796  
  4797  		// We'll only forward any new HTLC additions iff, it's "freshly
  4798  		// locked in". Meaning that the HTLC was only *just* considered
  4799  		// locked-in at this new state. By doing this we ensure that we
  4800  		// don't re-forward any already processed HTLC's after a
  4801  		// restart.
  4802  		switch {
  4803  		case pd.EntryType == Add && committedAdd && shouldFwdAdd:
  4804  			// Construct a reference specifying the location that
  4805  			// this forwarded Add will be written in the forwarding
  4806  			// package constructed at this remote height.
  4807  			pd.SourceRef = &channeldb.AddRef{
  4808  				Height: remoteChainTail,
  4809  				Index:  addIndex,
  4810  			}
  4811  			addIndex++
  4812  
  4813  			pd.isForwarded = true
  4814  			addsToForward = append(addsToForward, pd)
  4815  
  4816  		case pd.EntryType != Add && committedRmv && shouldFwdRmv:
  4817  			// Construct a reference specifying the location that
  4818  			// this forwarded Settle/Fail will be written in the
  4819  			// forwarding package constructed at this remote height.
  4820  			pd.DestRef = &channeldb.SettleFailRef{
  4821  				Source: source,
  4822  				Height: remoteChainTail,
  4823  				Index:  settleFailIndex,
  4824  			}
  4825  			settleFailIndex++
  4826  
  4827  			pd.isForwarded = true
  4828  			settleFailsToForward = append(settleFailsToForward, pd)
  4829  
  4830  		default:
  4831  			continue
  4832  		}
  4833  
  4834  		// If we've reached this point, this HTLC will be added to the
  4835  		// forwarding package at the height of the remote commitment.
  4836  		// All types of HTLCs will record their assigned log index.
  4837  		logUpdate := channeldb.LogUpdate{
  4838  			LogIndex: pd.LogIndex,
  4839  		}
  4840  
  4841  		// Next, we'll map the type of the PaymentDescriptor to one of
  4842  		// the four messages that it corresponds to and separate the
  4843  		// updates into Adds and Settle/Fail/MalformedFail such that
  4844  		// they can be written in the forwarding package. Adds are
  4845  		// aggregated separately from the other types of HTLCs.
  4846  		switch pd.EntryType {
  4847  		case Add:
  4848  			htlc := &lnwire.UpdateAddHTLC{
  4849  				ChanID:      chanID,
  4850  				ID:          pd.HtlcIndex,
  4851  				Amount:      pd.Amount,
  4852  				Expiry:      pd.Timeout,
  4853  				PaymentHash: pd.RHash,
  4854  			}
  4855  			copy(htlc.OnionBlob[:], pd.OnionBlob)
  4856  			logUpdate.UpdateMsg = htlc
  4857  			addUpdates = append(addUpdates, logUpdate)
  4858  
  4859  		case Settle:
  4860  			logUpdate.UpdateMsg = &lnwire.UpdateFulfillHTLC{
  4861  				ChanID:          chanID,
  4862  				ID:              pd.ParentIndex,
  4863  				PaymentPreimage: pd.RPreimage,
  4864  			}
  4865  			settleFailUpdates = append(settleFailUpdates, logUpdate)
  4866  
  4867  		case Fail:
  4868  			logUpdate.UpdateMsg = &lnwire.UpdateFailHTLC{
  4869  				ChanID: chanID,
  4870  				ID:     pd.ParentIndex,
  4871  				Reason: pd.FailReason,
  4872  			}
  4873  			settleFailUpdates = append(settleFailUpdates, logUpdate)
  4874  
  4875  		case MalformedFail:
  4876  			logUpdate.UpdateMsg = &lnwire.UpdateFailMalformedHTLC{
  4877  				ChanID:       chanID,
  4878  				ID:           pd.ParentIndex,
  4879  				ShaOnionBlob: pd.ShaOnionBlob,
  4880  				FailureCode:  pd.FailCode,
  4881  			}
  4882  			settleFailUpdates = append(settleFailUpdates, logUpdate)
  4883  		}
  4884  	}
  4885  
  4886  	// We use the remote commitment chain's tip as it will soon become the tail
  4887  	// once advanceTail is called.
  4888  	remoteMessageIndex := lc.remoteCommitChain.tip().ourMessageIndex
  4889  	localMessageIndex := lc.localCommitChain.tail().ourMessageIndex
  4890  
  4891  	localPeerUpdates := lc.unsignedLocalUpdates(
  4892  		remoteMessageIndex, localMessageIndex, chanID,
  4893  	)
  4894  
  4895  	// Now that we have gathered the set of HTLCs to forward, separated by
  4896  	// type, construct a forwarding package using the height that the remote
  4897  	// commitment chain will be extended after persisting the revocation.
  4898  	fwdPkg := channeldb.NewFwdPkg(
  4899  		source, remoteChainTail, addUpdates, settleFailUpdates,
  4900  	)
  4901  
  4902  	// At this point, the revocation has been accepted, and we've rotated
  4903  	// the current revocation key+hash for the remote party. Therefore we
  4904  	// sync now to ensure the revocation producer state is consistent with
  4905  	// the current commitment height and also to advance the on-disk
  4906  	// commitment chain.
  4907  	err = lc.channelState.AdvanceCommitChainTail(fwdPkg, localPeerUpdates)
  4908  	if err != nil {
  4909  		return nil, nil, nil, nil, err
  4910  	}
  4911  
  4912  	// Since they revoked the current lowest height in their commitment
  4913  	// chain, we can advance their chain by a single commitment.
  4914  	lc.remoteCommitChain.advanceTail()
  4915  
  4916  	// As we've just completed a new state transition, attempt to see if we
  4917  	// can remove any entries from the update log which have been removed
  4918  	// from the PoV of both commitment chains.
  4919  	compactLogs(
  4920  		lc.localUpdateLog, lc.remoteUpdateLog, localChainTail,
  4921  		remoteChainTail,
  4922  	)
  4923  
  4924  	remoteHTLCs := lc.channelState.RemoteCommitment.Htlcs
  4925  
  4926  	return fwdPkg, addsToForward, settleFailsToForward, remoteHTLCs, nil
  4927  }
  4928  
  4929  // LoadFwdPkgs loads any pending log updates from disk and returns the payment
  4930  // descriptors to be processed by the link.
  4931  func (lc *LightningChannel) LoadFwdPkgs() ([]*channeldb.FwdPkg, error) {
  4932  	return lc.channelState.LoadFwdPkgs()
  4933  }
  4934  
  4935  // AckAddHtlcs sets a bit in the FwdFilter of a forwarding package belonging to
  4936  // this channel, that corresponds to the given AddRef. This method also succeeds
  4937  // if no forwarding package is found.
  4938  func (lc *LightningChannel) AckAddHtlcs(addRef channeldb.AddRef) error {
  4939  	return lc.channelState.AckAddHtlcs(addRef)
  4940  }
  4941  
  4942  // AckSettleFails sets a bit in the SettleFailFilter of a forwarding package
  4943  // belonging to this channel, that corresponds to the given SettleFailRef. This
  4944  // method also succeeds if no forwarding package is found.
  4945  func (lc *LightningChannel) AckSettleFails(
  4946  	settleFailRefs ...channeldb.SettleFailRef) error {
  4947  
  4948  	return lc.channelState.AckSettleFails(settleFailRefs...)
  4949  }
  4950  
  4951  // SetFwdFilter writes the forwarding decision for a given remote commitment
  4952  // height.
  4953  func (lc *LightningChannel) SetFwdFilter(height uint64,
  4954  	fwdFilter *channeldb.PkgFilter) error {
  4955  
  4956  	return lc.channelState.SetFwdFilter(height, fwdFilter)
  4957  }
  4958  
  4959  // RemoveFwdPkgs permanently deletes the forwarding package at the given heights.
  4960  func (lc *LightningChannel) RemoveFwdPkgs(heights ...uint64) error {
  4961  	return lc.channelState.RemoveFwdPkgs(heights...)
  4962  }
  4963  
  4964  // NextRevocationKey returns the commitment point for the _next_ commitment
  4965  // height. The pubkey returned by this function is required by the remote party
  4966  // along with their revocation base to extend our commitment chain with a
  4967  // new commitment.
  4968  func (lc *LightningChannel) NextRevocationKey() (*secp256k1.PublicKey, error) {
  4969  	lc.RLock()
  4970  	defer lc.RUnlock()
  4971  
  4972  	nextHeight := lc.currentHeight + 1
  4973  	revocation, err := lc.channelState.RevocationProducer.AtIndex(nextHeight)
  4974  	if err != nil {
  4975  		return nil, err
  4976  	}
  4977  
  4978  	return input.ComputeCommitmentPoint(revocation[:]), nil
  4979  }
  4980  
  4981  // InitNextRevocation inserts the passed commitment point as the _next_
  4982  // revocation to be used when creating a new commitment state for the remote
  4983  // party. This function MUST be called before the channel can accept or propose
  4984  // any new states.
  4985  func (lc *LightningChannel) InitNextRevocation(revKey *secp256k1.PublicKey) error {
  4986  	lc.Lock()
  4987  	defer lc.Unlock()
  4988  
  4989  	return lc.channelState.InsertNextRevocation(revKey)
  4990  }
  4991  
  4992  // AddHTLC adds an HTLC to the state machine's local update log. This method
  4993  // should be called when preparing to send an outgoing HTLC.
  4994  //
  4995  // The additional openKey argument corresponds to the incoming CircuitKey of the
  4996  // committed circuit for this HTLC. This value should never be nil.
  4997  //
  4998  // Note that AddHTLC doesn't reserve the HTLC fee for future payment (like
  4999  // AvailableBalance does), so one could get into the "stuck channel" state by
  5000  // sending dust HTLCs.
  5001  // TODO(halseth): fix this either by using additional reserve, or better commit
  5002  // format. See https://github.com/lightningnetwork/lightning-rfc/issues/728
  5003  //
  5004  // NOTE: It is okay for sourceRef to be nil when unit testing the wallet.
  5005  func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC,
  5006  	openKey *channeldb.CircuitKey) (uint64, error) {
  5007  
  5008  	lc.Lock()
  5009  	defer lc.Unlock()
  5010  
  5011  	pd := lc.htlcAddDescriptor(htlc, openKey)
  5012  	if err := lc.validateAddHtlc(pd); err != nil {
  5013  		return 0, err
  5014  	}
  5015  
  5016  	lc.localUpdateLog.appendHtlc(pd)
  5017  
  5018  	return pd.HtlcIndex, nil
  5019  }
  5020  
  5021  // GetDustSum takes in a boolean that determines which commitment to evaluate
  5022  // the dust sum on. The return value is the sum of dust on the desired
  5023  // commitment tx.
  5024  //
  5025  // NOTE: This over-estimates the dust exposure.
  5026  func (lc *LightningChannel) GetDustSum(remote bool) lnwire.MilliAtom {
  5027  	lc.RLock()
  5028  	defer lc.RUnlock()
  5029  
  5030  	var dustSum lnwire.MilliAtom
  5031  
  5032  	dustLimit := lc.channelState.LocalChanCfg.DustLimit
  5033  	commit := lc.channelState.LocalCommitment
  5034  	if remote {
  5035  		// Calculate dust sum on the remote's commitment.
  5036  		dustLimit = lc.channelState.RemoteChanCfg.DustLimit
  5037  		commit = lc.channelState.RemoteCommitment
  5038  	}
  5039  
  5040  	chanType := lc.channelState.ChanType
  5041  	feeRate := chainfee.AtomPerKByte(commit.FeePerKB)
  5042  
  5043  	// Grab all of our HTLCs and evaluate against the dust limit.
  5044  	for e := lc.localUpdateLog.Front(); e != nil; e = e.Next() {
  5045  		pd := e.Value.(*PaymentDescriptor)
  5046  		if pd.EntryType != Add {
  5047  			continue
  5048  		}
  5049  
  5050  		amt := pd.Amount.ToAtoms()
  5051  
  5052  		// If the satoshi amount is under the dust limit, add the msat
  5053  		// amount to the dust sum.
  5054  		if HtlcIsDust(
  5055  			chanType, false, !remote, feeRate, amt, dustLimit,
  5056  		) {
  5057  			dustSum += pd.Amount
  5058  		}
  5059  	}
  5060  
  5061  	// Grab all of their HTLCs and evaluate against the dust limit.
  5062  	for e := lc.remoteUpdateLog.Front(); e != nil; e = e.Next() {
  5063  		pd := e.Value.(*PaymentDescriptor)
  5064  		if pd.EntryType != Add {
  5065  			continue
  5066  		}
  5067  
  5068  		amt := pd.Amount.ToAtoms()
  5069  
  5070  		// If the satoshi amount is under the dust limit, add the msat
  5071  		// amount to the dust sum.
  5072  		if HtlcIsDust(
  5073  			chanType, true, !remote, feeRate, amt, dustLimit,
  5074  		) {
  5075  			dustSum += pd.Amount
  5076  		}
  5077  	}
  5078  
  5079  	return dustSum
  5080  }
  5081  
  5082  // MayAddOutgoingHtlc validates whether we can add an outgoing htlc to this
  5083  // channel. We don't have a circuit for this htlc, because we just want to test
  5084  // that we have slots for a potential htlc so we use a "mock" htlc to validate
  5085  // a potential commitment state with one more outgoing htlc. If a zero htlc
  5086  // amount is provided, we'll attempt to add the smallest possible htlc to the
  5087  // channel (either the minimum htlc, or 1 sat).
  5088  func (lc *LightningChannel) MayAddOutgoingHtlc(amt lnwire.MilliAtom) error {
  5089  	lc.Lock()
  5090  	defer lc.Unlock()
  5091  
  5092  	var mockHtlcAmt lnwire.MilliAtom
  5093  	switch {
  5094  	// If the caller specifically set an amount, we use it.
  5095  	case amt != 0:
  5096  		mockHtlcAmt = amt
  5097  
  5098  	// In absence of a specific amount, we want to use minimum htlc value
  5099  	// for the channel. However certain implementations may set this value
  5100  	// to zero, so we only use this value if it is non-zero.
  5101  	case lc.channelState.LocalChanCfg.MinHTLC != 0:
  5102  		mockHtlcAmt = lc.channelState.LocalChanCfg.MinHTLC
  5103  
  5104  	// As a last resort, we just add a non-zero amount.
  5105  	default:
  5106  		mockHtlcAmt++
  5107  	}
  5108  
  5109  	// Create a "mock" outgoing htlc, using the smallest amount we can add
  5110  	// to the commitment so that we validate commitment slots rather than
  5111  	// available balance, since our actual htlc amount is unknown at this
  5112  	// stage.
  5113  	pd := lc.htlcAddDescriptor(
  5114  		&lnwire.UpdateAddHTLC{
  5115  			Amount: mockHtlcAmt,
  5116  		},
  5117  		&channeldb.CircuitKey{},
  5118  	)
  5119  
  5120  	if err := lc.validateAddHtlc(pd); err != nil {
  5121  		lc.log.Debugf("May add outgoing htlc rejected: %v", err)
  5122  		return err
  5123  	}
  5124  
  5125  	return nil
  5126  }
  5127  
  5128  // htlcAddDescriptor returns a payment descriptor for the htlc and open key
  5129  // provided to add to our local update log.
  5130  func (lc *LightningChannel) htlcAddDescriptor(htlc *lnwire.UpdateAddHTLC,
  5131  	openKey *channeldb.CircuitKey) *PaymentDescriptor {
  5132  
  5133  	return &PaymentDescriptor{
  5134  		EntryType:      Add,
  5135  		RHash:          PaymentHash(htlc.PaymentHash),
  5136  		Timeout:        htlc.Expiry,
  5137  		Amount:         htlc.Amount,
  5138  		LogIndex:       lc.localUpdateLog.logIndex,
  5139  		HtlcIndex:      lc.localUpdateLog.htlcCounter,
  5140  		OnionBlob:      htlc.OnionBlob[:],
  5141  		OpenCircuitKey: openKey,
  5142  	}
  5143  }
  5144  
  5145  // validateAddHtlc validates the addition of an outgoing htlc to our local and
  5146  // remote commitments.
  5147  func (lc *LightningChannel) validateAddHtlc(pd *PaymentDescriptor) error {
  5148  	// Make sure adding this HTLC won't violate any of the constraints we
  5149  	// must keep on the commitment transactions.
  5150  	remoteACKedIndex := lc.localCommitChain.tail().theirMessageIndex
  5151  
  5152  	// First we'll check whether this HTLC can be added to the remote
  5153  	// commitment transaction without violation any of the constraints.
  5154  	err := lc.validateCommitmentSanity(
  5155  		remoteACKedIndex, lc.localUpdateLog.logIndex, true, pd, nil,
  5156  	)
  5157  	if err != nil {
  5158  		return err
  5159  	}
  5160  
  5161  	// We must also check whether it can be added to our own commitment
  5162  	// transaction, or the remote node will refuse to sign. This is not
  5163  	// totally bullet proof, as the remote might be adding updates
  5164  	// concurrently, but if we fail this check there is for sure not
  5165  	// possible for us to add the HTLC.
  5166  	err = lc.validateCommitmentSanity(
  5167  		lc.remoteUpdateLog.logIndex, lc.localUpdateLog.logIndex,
  5168  		false, pd, nil,
  5169  	)
  5170  	if err != nil {
  5171  		return err
  5172  	}
  5173  
  5174  	return nil
  5175  }
  5176  
  5177  // ReceiveHTLC adds an HTLC to the state machine's remote update log. This
  5178  // method should be called in response to receiving a new HTLC from the remote
  5179  // party.
  5180  func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, error) {
  5181  	lc.Lock()
  5182  	defer lc.Unlock()
  5183  
  5184  	if htlc.ID != lc.remoteUpdateLog.htlcCounter {
  5185  		return 0, fmt.Errorf("ID %d on HTLC add does not match expected next "+
  5186  			"ID %d", htlc.ID, lc.remoteUpdateLog.htlcCounter)
  5187  	}
  5188  
  5189  	pd := &PaymentDescriptor{
  5190  		EntryType: Add,
  5191  		RHash:     PaymentHash(htlc.PaymentHash),
  5192  		Timeout:   htlc.Expiry,
  5193  		Amount:    htlc.Amount,
  5194  		LogIndex:  lc.remoteUpdateLog.logIndex,
  5195  		HtlcIndex: lc.remoteUpdateLog.htlcCounter,
  5196  		OnionBlob: htlc.OnionBlob[:],
  5197  	}
  5198  
  5199  	localACKedIndex := lc.remoteCommitChain.tail().ourMessageIndex
  5200  
  5201  	// Clamp down on the number of HTLC's we can receive by checking the
  5202  	// commitment sanity.
  5203  	err := lc.validateCommitmentSanity(
  5204  		lc.remoteUpdateLog.logIndex, localACKedIndex, false, nil, pd,
  5205  	)
  5206  	if err != nil {
  5207  		return 0, err
  5208  	}
  5209  
  5210  	lc.remoteUpdateLog.appendHtlc(pd)
  5211  
  5212  	return pd.HtlcIndex, nil
  5213  }
  5214  
  5215  // SettleHTLC attempts to settle an existing outstanding received HTLC. The
  5216  // remote log index of the HTLC settled is returned in order to facilitate
  5217  // creating the corresponding wire message. In the case the supplied preimage
  5218  // is invalid, an error is returned.
  5219  //
  5220  // The additional arguments correspond to:
  5221  //
  5222  //   - sourceRef: specifies the location of the Add HTLC within a forwarding
  5223  //     package that this HTLC is settling. Every Settle fails exactly one Add,
  5224  //     so this should never be empty in practice.
  5225  //
  5226  //   - destRef: specifies the location of the Settle HTLC within another
  5227  //     channel's forwarding package. This value can be nil if the corresponding
  5228  //     Add HTLC was never locked into an outgoing commitment txn, or this
  5229  //     HTLC does not originate as a response from the peer on the outgoing
  5230  //     link, e.g. on-chain resolutions.
  5231  //
  5232  //   - closeKey: identifies the circuit that should be deleted after this Settle
  5233  //     HTLC is included in a commitment txn. This value should only be nil if
  5234  //     the HTLC was settled locally before committing a circuit to the circuit
  5235  //     map.
  5236  //
  5237  // NOTE: It is okay for sourceRef, destRef, and closeKey to be nil when unit
  5238  // testing the wallet.
  5239  func (lc *LightningChannel) SettleHTLC(preimage lntypes.Preimage,
  5240  	htlcIndex uint64, sourceRef *channeldb.AddRef,
  5241  	destRef *channeldb.SettleFailRef, closeKey *channeldb.CircuitKey) error {
  5242  
  5243  	lc.Lock()
  5244  	defer lc.Unlock()
  5245  
  5246  	htlc := lc.remoteUpdateLog.lookupHtlc(htlcIndex)
  5247  	if htlc == nil {
  5248  		return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
  5249  	}
  5250  
  5251  	// Now that we know the HTLC exists, before checking to see if the
  5252  	// preimage matches, we'll ensure that we haven't already attempted to
  5253  	// modify the HTLC.
  5254  	if lc.remoteUpdateLog.htlcHasModification(htlcIndex) {
  5255  		return ErrHtlcIndexAlreadySettled(htlcIndex)
  5256  	}
  5257  
  5258  	if htlc.RHash != PaymentHash(preimage.Hash()) {
  5259  		return ErrInvalidSettlePreimage{preimage[:], htlc.RHash[:]}
  5260  	}
  5261  
  5262  	pd := &PaymentDescriptor{
  5263  		Amount:           htlc.Amount,
  5264  		RPreimage:        preimage,
  5265  		LogIndex:         lc.localUpdateLog.logIndex,
  5266  		ParentIndex:      htlcIndex,
  5267  		EntryType:        Settle,
  5268  		SourceRef:        sourceRef,
  5269  		DestRef:          destRef,
  5270  		ClosedCircuitKey: closeKey,
  5271  	}
  5272  
  5273  	lc.localUpdateLog.appendUpdate(pd)
  5274  
  5275  	// With the settle added to our local log, we'll now mark the HTLC as
  5276  	// modified to prevent ourselves from accidentally attempting a
  5277  	// duplicate settle.
  5278  	lc.remoteUpdateLog.markHtlcModified(htlcIndex)
  5279  
  5280  	return nil
  5281  }
  5282  
  5283  // ReceiveHTLCSettle attempts to settle an existing outgoing HTLC indexed by an
  5284  // index into the local log. If the specified index doesn't exist within the
  5285  // log, and error is returned. Similarly if the preimage is invalid w.r.t to
  5286  // the referenced of then a distinct error is returned.
  5287  func (lc *LightningChannel) ReceiveHTLCSettle(preimage lntypes.Preimage, htlcIndex uint64) error {
  5288  	lc.Lock()
  5289  	defer lc.Unlock()
  5290  
  5291  	htlc := lc.localUpdateLog.lookupHtlc(htlcIndex)
  5292  	if htlc == nil {
  5293  		return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
  5294  	}
  5295  
  5296  	// Now that we know the HTLC exists, before checking to see if the
  5297  	// preimage matches, we'll ensure that they haven't already attempted
  5298  	// to modify the HTLC.
  5299  	if lc.localUpdateLog.htlcHasModification(htlcIndex) {
  5300  		return ErrHtlcIndexAlreadySettled(htlcIndex)
  5301  	}
  5302  
  5303  	if htlc.RHash != PaymentHash(preimage.Hash()) {
  5304  		return ErrInvalidSettlePreimage{preimage[:], htlc.RHash[:]}
  5305  	}
  5306  
  5307  	pd := &PaymentDescriptor{
  5308  		Amount:      htlc.Amount,
  5309  		RPreimage:   preimage,
  5310  		ParentIndex: htlc.HtlcIndex,
  5311  		RHash:       htlc.RHash,
  5312  		LogIndex:    lc.remoteUpdateLog.logIndex,
  5313  		EntryType:   Settle,
  5314  	}
  5315  
  5316  	lc.remoteUpdateLog.appendUpdate(pd)
  5317  
  5318  	// With the settle added to the remote log, we'll now mark the HTLC as
  5319  	// modified to prevent the remote party from accidentally attempting a
  5320  	// duplicate settle.
  5321  	lc.localUpdateLog.markHtlcModified(htlcIndex)
  5322  
  5323  	return nil
  5324  }
  5325  
  5326  // FailHTLC attempts to fail a targeted HTLC by its payment hash, inserting an
  5327  // entry which will remove the target log entry within the next commitment
  5328  // update. This method is intended to be called in order to cancel in
  5329  // _incoming_ HTLC.
  5330  //
  5331  // The additional arguments correspond to:
  5332  //
  5333  //   - sourceRef: specifies the location of the Add HTLC within a forwarding
  5334  //     package that this HTLC is failing. Every Fail fails exactly one Add, so
  5335  //     this should never be empty in practice.
  5336  //
  5337  //   - destRef: specifies the location of the Fail HTLC within another channel's
  5338  //     forwarding package. This value can be nil if the corresponding Add HTLC
  5339  //     was never locked into an outgoing commitment txn, or this HTLC does not
  5340  //     originate as a response from the peer on the outgoing link, e.g.
  5341  //     on-chain resolutions.
  5342  //
  5343  //   - closeKey: identifies the circuit that should be deleted after this Fail
  5344  //     HTLC is included in a commitment txn. This value should only be nil if
  5345  //     the HTLC was failed locally before committing a circuit to the circuit
  5346  //     map.
  5347  //
  5348  // NOTE: It is okay for sourceRef, destRef, and closeKey to be nil when unit
  5349  // testing the wallet.
  5350  func (lc *LightningChannel) FailHTLC(htlcIndex uint64, reason []byte,
  5351  	sourceRef *channeldb.AddRef, destRef *channeldb.SettleFailRef,
  5352  	closeKey *channeldb.CircuitKey) error {
  5353  
  5354  	lc.Lock()
  5355  	defer lc.Unlock()
  5356  
  5357  	htlc := lc.remoteUpdateLog.lookupHtlc(htlcIndex)
  5358  	if htlc == nil {
  5359  		return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
  5360  	}
  5361  
  5362  	// Now that we know the HTLC exists, we'll ensure that we haven't
  5363  	// already attempted to fail the HTLC.
  5364  	if lc.remoteUpdateLog.htlcHasModification(htlcIndex) {
  5365  		return ErrHtlcIndexAlreadyFailed(htlcIndex)
  5366  	}
  5367  
  5368  	pd := &PaymentDescriptor{
  5369  		Amount:           htlc.Amount,
  5370  		RHash:            htlc.RHash,
  5371  		ParentIndex:      htlcIndex,
  5372  		LogIndex:         lc.localUpdateLog.logIndex,
  5373  		EntryType:        Fail,
  5374  		FailReason:       reason,
  5375  		SourceRef:        sourceRef,
  5376  		DestRef:          destRef,
  5377  		ClosedCircuitKey: closeKey,
  5378  	}
  5379  
  5380  	lc.localUpdateLog.appendUpdate(pd)
  5381  
  5382  	// With the fail added to the remote log, we'll now mark the HTLC as
  5383  	// modified to prevent ourselves from accidentally attempting a
  5384  	// duplicate fail.
  5385  	lc.remoteUpdateLog.markHtlcModified(htlcIndex)
  5386  
  5387  	return nil
  5388  }
  5389  
  5390  // MalformedFailHTLC attempts to fail a targeted HTLC by its payment hash,
  5391  // inserting an entry which will remove the target log entry within the next
  5392  // commitment update. This method is intended to be called in order to cancel
  5393  // in _incoming_ HTLC.
  5394  //
  5395  // The additional sourceRef specifies the location of the Add HTLC within a
  5396  // forwarding package that this HTLC is failing. This value should never be
  5397  // empty.
  5398  //
  5399  // NOTE: It is okay for sourceRef to be nil when unit testing the wallet.
  5400  func (lc *LightningChannel) MalformedFailHTLC(htlcIndex uint64,
  5401  	failCode lnwire.FailCode, shaOnionBlob [sha256.Size]byte,
  5402  	sourceRef *channeldb.AddRef) error {
  5403  
  5404  	lc.Lock()
  5405  	defer lc.Unlock()
  5406  
  5407  	htlc := lc.remoteUpdateLog.lookupHtlc(htlcIndex)
  5408  	if htlc == nil {
  5409  		return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
  5410  	}
  5411  
  5412  	// Now that we know the HTLC exists, we'll ensure that we haven't
  5413  	// already attempted to fail the HTLC.
  5414  	if lc.remoteUpdateLog.htlcHasModification(htlcIndex) {
  5415  		return ErrHtlcIndexAlreadyFailed(htlcIndex)
  5416  	}
  5417  
  5418  	pd := &PaymentDescriptor{
  5419  		Amount:       htlc.Amount,
  5420  		RHash:        htlc.RHash,
  5421  		ParentIndex:  htlcIndex,
  5422  		LogIndex:     lc.localUpdateLog.logIndex,
  5423  		EntryType:    MalformedFail,
  5424  		FailCode:     failCode,
  5425  		ShaOnionBlob: shaOnionBlob,
  5426  		SourceRef:    sourceRef,
  5427  	}
  5428  
  5429  	lc.localUpdateLog.appendUpdate(pd)
  5430  
  5431  	// With the fail added to the remote log, we'll now mark the HTLC as
  5432  	// modified to prevent ourselves from accidentally attempting a
  5433  	// duplicate fail.
  5434  	lc.remoteUpdateLog.markHtlcModified(htlcIndex)
  5435  
  5436  	return nil
  5437  }
  5438  
  5439  // ReceiveFailHTLC attempts to cancel a targeted HTLC by its log index,
  5440  // inserting an entry which will remove the target log entry within the next
  5441  // commitment update. This method should be called in response to the upstream
  5442  // party cancelling an outgoing HTLC. The value of the failed HTLC is returned
  5443  // along with an error indicating success.
  5444  func (lc *LightningChannel) ReceiveFailHTLC(htlcIndex uint64, reason []byte,
  5445  ) error {
  5446  
  5447  	lc.Lock()
  5448  	defer lc.Unlock()
  5449  
  5450  	htlc := lc.localUpdateLog.lookupHtlc(htlcIndex)
  5451  	if htlc == nil {
  5452  		return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
  5453  	}
  5454  
  5455  	// Now that we know the HTLC exists, we'll ensure that they haven't
  5456  	// already attempted to fail the HTLC.
  5457  	if lc.localUpdateLog.htlcHasModification(htlcIndex) {
  5458  		return ErrHtlcIndexAlreadyFailed(htlcIndex)
  5459  	}
  5460  
  5461  	pd := &PaymentDescriptor{
  5462  		Amount:      htlc.Amount,
  5463  		RHash:       htlc.RHash,
  5464  		ParentIndex: htlc.HtlcIndex,
  5465  		LogIndex:    lc.remoteUpdateLog.logIndex,
  5466  		EntryType:   Fail,
  5467  		FailReason:  reason,
  5468  	}
  5469  
  5470  	lc.remoteUpdateLog.appendUpdate(pd)
  5471  
  5472  	// With the fail added to the remote log, we'll now mark the HTLC as
  5473  	// modified to prevent ourselves from accidentally attempting a
  5474  	// duplicate fail.
  5475  	lc.localUpdateLog.markHtlcModified(htlcIndex)
  5476  
  5477  	return nil
  5478  }
  5479  
  5480  // ChannelPoint returns the outpoint of the original funding transaction which
  5481  // created this active channel. This outpoint is used throughout various
  5482  // subsystems to uniquely identify an open channel.
  5483  func (lc *LightningChannel) ChannelPoint() *wire.OutPoint {
  5484  	return &lc.channelState.FundingOutpoint
  5485  }
  5486  
  5487  // ShortChanID returns the short channel ID for the channel. The short channel
  5488  // ID encodes the exact location in the main chain that the original
  5489  // funding output can be found.
  5490  func (lc *LightningChannel) ShortChanID() lnwire.ShortChannelID {
  5491  	return lc.channelState.ShortChanID()
  5492  }
  5493  
  5494  // LocalUpfrontShutdownScript returns the local upfront shutdown script for the
  5495  // channel. If it was not set, an empty byte array is returned.
  5496  func (lc *LightningChannel) LocalUpfrontShutdownScript() lnwire.DeliveryAddress {
  5497  	return lc.channelState.LocalShutdownScript
  5498  }
  5499  
  5500  // RemoteUpfrontShutdownScript returns the remote upfront shutdown script for the
  5501  // channel. If it was not set, an empty byte array is returned.
  5502  func (lc *LightningChannel) RemoteUpfrontShutdownScript() lnwire.DeliveryAddress {
  5503  	return lc.channelState.RemoteShutdownScript
  5504  }
  5505  
  5506  // getSignedCommitTx function take the latest commitment transaction and
  5507  // populate it with witness data.
  5508  func (lc *LightningChannel) getSignedCommitTx() (*wire.MsgTx, error) {
  5509  	// Fetch the current commitment transaction, along with their signature
  5510  	// for the transaction.
  5511  	localCommit := lc.channelState.LocalCommitment
  5512  	commitTx := localCommit.CommitTx.Copy()
  5513  
  5514  	theirSig, err := ecdsa.ParseDERSignature(
  5515  		localCommit.CommitSig,
  5516  	)
  5517  	if err != nil {
  5518  		return nil, err
  5519  	}
  5520  
  5521  	// With this, we then generate the full witness so the caller can
  5522  	// broadcast a fully signed transaction.
  5523  	ourSig, err := lc.Signer.SignOutputRaw(commitTx, lc.signDesc)
  5524  	if err != nil {
  5525  		return nil, err
  5526  	}
  5527  
  5528  	// With the final signature generated, create the witness stack
  5529  	// required to spend from the multi-sig output.
  5530  	ourKey := lc.channelState.LocalChanCfg.MultiSigKey.PubKey.
  5531  		SerializeCompressed()
  5532  	theirKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey.
  5533  		SerializeCompressed()
  5534  
  5535  	witness := input.SpendMultiSig(
  5536  		lc.signDesc.WitnessScript, ourKey,
  5537  		ourSig, theirKey, theirSig,
  5538  	)
  5539  
  5540  	// Finally, convert the witness stack to a signature script and set it.
  5541  	sigScript, err := input.WitnessStackToSigScript(witness)
  5542  	if err != nil {
  5543  		return nil, err
  5544  	}
  5545  	commitTx.TxIn[0].SignatureScript = sigScript
  5546  
  5547  	return commitTx, nil
  5548  }
  5549  
  5550  // CommitOutputResolution carries the necessary information required to allow
  5551  // us to sweep our commitment output in the case that either party goes to
  5552  // chain.
  5553  type CommitOutputResolution struct {
  5554  	// SelfOutPoint is the full outpoint that points to out pay-to-self
  5555  	// output within the closing commitment transaction.
  5556  	SelfOutPoint wire.OutPoint
  5557  
  5558  	// SelfOutputSignDesc is a fully populated sign descriptor capable of
  5559  	// generating a valid signature to sweep the output paying to us.
  5560  	SelfOutputSignDesc input.SignDescriptor
  5561  
  5562  	// MaturityDelay is the relative time-lock, in blocks for all outputs
  5563  	// that pay to the local party within the broadcast commitment
  5564  	// transaction.
  5565  	MaturityDelay uint32
  5566  }
  5567  
  5568  // UnilateralCloseSummary describes the details of a detected unilateral
  5569  // channel closure. This includes the information about with which
  5570  // transactions, and block the channel was unilaterally closed, as well as
  5571  // summarization details concerning the _state_ of the channel at the point of
  5572  // channel closure. Additionally, if we had a commitment output above dust on
  5573  // the remote party's commitment transaction, the necessary a input.SignDescriptor
  5574  // with the material necessary to seep the output are returned. Finally, if we
  5575  // had any outgoing HTLC's within the commitment transaction, then an
  5576  // OutgoingHtlcResolution for each output will included.
  5577  type UnilateralCloseSummary struct {
  5578  	// SpendDetail is a struct that describes how and when the funding
  5579  	// output was spent.
  5580  	*chainntnfs.SpendDetail
  5581  
  5582  	// ChannelCloseSummary is a struct describing the final state of the
  5583  	// channel and in which state is was closed.
  5584  	channeldb.ChannelCloseSummary
  5585  
  5586  	// CommitResolution contains all the data required to sweep the output
  5587  	// to ourselves. If this is our commitment transaction, then we'll need
  5588  	// to wait a time delay before we can sweep the output.
  5589  	//
  5590  	// NOTE: If our commitment delivery output is below the dust limit,
  5591  	// then this will be nil.
  5592  	CommitResolution *CommitOutputResolution
  5593  
  5594  	// HtlcResolutions contains a fully populated HtlcResolutions struct
  5595  	// which contains all the data required to sweep any outgoing HTLC's,
  5596  	// and also any incoming HTLC's that we know the pre-image to.
  5597  	HtlcResolutions *HtlcResolutions
  5598  
  5599  	// RemoteCommit is the exact commitment state that the remote party
  5600  	// broadcast.
  5601  	RemoteCommit channeldb.ChannelCommitment
  5602  
  5603  	// AnchorResolution contains the data required to sweep our anchor
  5604  	// output. If the channel type doesn't include anchors, the value of
  5605  	// this field will be nil.
  5606  	AnchorResolution *AnchorResolution
  5607  }
  5608  
  5609  // NewUnilateralCloseSummary creates a new summary that provides the caller
  5610  // with all the information required to claim all funds on chain in the event
  5611  // that the remote party broadcasts their commitment. The commitPoint argument
  5612  // should be set to the per_commitment_point corresponding to the spending
  5613  // commitment.
  5614  //
  5615  // NOTE: The remoteCommit argument should be set to the stored commitment for
  5616  // this particular state. If we don't have the commitment stored (should only
  5617  // happen in case we have lost state) it should be set to an empty struct, in
  5618  // which case we will attempt to sweep the non-HTLC output using the passed
  5619  // commitPoint.
  5620  func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Signer,
  5621  	commitSpend *chainntnfs.SpendDetail,
  5622  	remoteCommit channeldb.ChannelCommitment,
  5623  	commitPoint *secp256k1.PublicKey) (*UnilateralCloseSummary, error) {
  5624  
  5625  	// First, we'll generate the commitment point and the revocation point
  5626  	// so we can re-construct the HTLC state and also our payment key.
  5627  	isOurCommit := false
  5628  	keyRing := DeriveCommitmentKeys(
  5629  		commitPoint, isOurCommit, chanState.ChanType,
  5630  		&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
  5631  	)
  5632  
  5633  	// Next, we'll obtain HTLC resolutions for all the outgoing HTLC's we
  5634  	// had on their commitment transaction.
  5635  	var leaseExpiry uint32
  5636  	if chanState.ChanType.HasLeaseExpiration() {
  5637  		leaseExpiry = chanState.ThawHeight
  5638  	}
  5639  	isRemoteInitiator := !chanState.IsInitiator
  5640  	htlcResolutions, err := extractHtlcResolutions(
  5641  		chainfee.AtomPerKByte(remoteCommit.FeePerKB), isOurCommit,
  5642  		signer, remoteCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
  5643  		&chanState.RemoteChanCfg, commitSpend.SpendingTx,
  5644  		chanState.ChanType, isRemoteInitiator, leaseExpiry,
  5645  	)
  5646  	if err != nil {
  5647  		return nil, fmt.Errorf("unable to create htlc "+
  5648  			"resolutions: %v", err)
  5649  	}
  5650  
  5651  	commitTxBroadcast := commitSpend.SpendingTx
  5652  
  5653  	// Before we can generate the proper sign descriptor, we'll need to
  5654  	// locate the output index of our non-delayed output on the commitment
  5655  	// transaction.
  5656  	selfScript, maturityDelay, err := CommitScriptToRemote(
  5657  		chanState.ChanType, isRemoteInitiator, keyRing.ToRemoteKey,
  5658  		leaseExpiry,
  5659  	)
  5660  	if err != nil {
  5661  		return nil, fmt.Errorf("unable to create self commit "+
  5662  			"script: %v", err)
  5663  	}
  5664  
  5665  	var (
  5666  		selfPoint    *wire.OutPoint
  5667  		localBalance int64
  5668  	)
  5669  
  5670  	for outputIndex, txOut := range commitTxBroadcast.TxOut {
  5671  		if bytes.Equal(txOut.PkScript, selfScript.PkScript) {
  5672  			selfPoint = &wire.OutPoint{
  5673  				Hash:  *commitSpend.SpenderTxHash,
  5674  				Index: uint32(outputIndex),
  5675  			}
  5676  			localBalance = txOut.Value
  5677  			break
  5678  		}
  5679  	}
  5680  
  5681  	// With the HTLC's taken care of, we'll generate the sign descriptor
  5682  	// necessary to sweep our commitment output, but only if we had a
  5683  	// non-trimmed balance.
  5684  	var commitResolution *CommitOutputResolution
  5685  	if selfPoint != nil {
  5686  		localPayBase := chanState.LocalChanCfg.PaymentBasePoint
  5687  		commitResolution = &CommitOutputResolution{
  5688  			SelfOutPoint: *selfPoint,
  5689  			SelfOutputSignDesc: input.SignDescriptor{
  5690  				KeyDesc:       localPayBase,
  5691  				SingleTweak:   keyRing.LocalCommitKeyTweak,
  5692  				WitnessScript: selfScript.WitnessScript,
  5693  				Output: &wire.TxOut{
  5694  					Value:    localBalance,
  5695  					PkScript: selfScript.PkScript,
  5696  				},
  5697  				HashType: txscript.SigHashAll,
  5698  			},
  5699  			MaturityDelay: maturityDelay,
  5700  		}
  5701  	}
  5702  
  5703  	closeSummary := channeldb.ChannelCloseSummary{
  5704  		ChanPoint:               chanState.FundingOutpoint,
  5705  		ChainHash:               chanState.ChainHash,
  5706  		ClosingTXID:             *commitSpend.SpenderTxHash,
  5707  		CloseHeight:             uint32(commitSpend.SpendingHeight),
  5708  		RemotePub:               chanState.IdentityPub,
  5709  		Capacity:                chanState.Capacity,
  5710  		SettledBalance:          dcrutil.Amount(localBalance),
  5711  		CloseType:               channeldb.RemoteForceClose,
  5712  		IsPending:               true,
  5713  		RemoteCurrentRevocation: chanState.RemoteCurrentRevocation,
  5714  		RemoteNextRevocation:    chanState.RemoteNextRevocation,
  5715  		ShortChanID:             chanState.ShortChanID(),
  5716  		LocalChanConfig:         chanState.LocalChanCfg,
  5717  	}
  5718  
  5719  	// Attempt to add a channel sync message to the close summary.
  5720  	chanSync, err := chanState.ChanSyncMsg()
  5721  	if err != nil {
  5722  		walletLog.Errorf("ChannelPoint(%v): unable to create channel sync "+
  5723  			"message: %v", chanState.FundingOutpoint, err)
  5724  	} else {
  5725  		closeSummary.LastChanSyncMsg = chanSync
  5726  	}
  5727  
  5728  	anchorResolution, err := NewAnchorResolution(
  5729  		chanState, commitTxBroadcast,
  5730  	)
  5731  	if err != nil {
  5732  		return nil, err
  5733  	}
  5734  
  5735  	return &UnilateralCloseSummary{
  5736  		SpendDetail:         commitSpend,
  5737  		ChannelCloseSummary: closeSummary,
  5738  		CommitResolution:    commitResolution,
  5739  		HtlcResolutions:     htlcResolutions,
  5740  		RemoteCommit:        remoteCommit,
  5741  		AnchorResolution:    anchorResolution,
  5742  	}, nil
  5743  }
  5744  
  5745  // IncomingHtlcResolution houses the information required to sweep any incoming
  5746  // HTLC's that we know the preimage to. We'll need to sweep an HTLC manually
  5747  // using this struct if we need to go on-chain for any reason, or if we detect
  5748  // that the remote party broadcasts their commitment transaction.
  5749  type IncomingHtlcResolution struct {
  5750  	// Preimage is the preimage that will be used to satisfy the contract of
  5751  	// the HTLC.
  5752  	//
  5753  	// NOTE: This field will only be populated in the incoming contest
  5754  	// resolver.
  5755  	Preimage [32]byte
  5756  
  5757  	// SignedSuccessTx is the fully signed HTLC success transaction. This
  5758  	// transaction (if non-nil) can be broadcast immediately. After a csv
  5759  	// delay (included below), then the output created by this transactions
  5760  	// can be swept on-chain.
  5761  	//
  5762  	// NOTE: If this field is nil, then this indicates that we don't need
  5763  	// to go to the second level to claim this HTLC. Instead, it can be
  5764  	// claimed directly from the outpoint listed below.
  5765  	SignedSuccessTx *wire.MsgTx
  5766  
  5767  	// SignDetails is non-nil if SignedSuccessTx is non-nil, and the
  5768  	// channel is of the anchor type. As the above HTLC transaction will be
  5769  	// signed by the channel peer using SINGLE|ANYONECANPAY for such
  5770  	// channels, we can use the sign details to add the input-output pair
  5771  	// of the HTLC transaction to another transaction, thereby aggregating
  5772  	// multiple HTLC transactions together, and adding fees as needed.
  5773  	SignDetails *input.SignDetails
  5774  
  5775  	// CsvDelay is the relative time lock (expressed in blocks) that must
  5776  	// pass after the SignedSuccessTx is confirmed in the chain before the
  5777  	// output can be swept.
  5778  	//
  5779  	// NOTE: If SignedTimeoutTx is nil, then this field denotes the CSV
  5780  	// delay needed to spend from the commitment transaction.
  5781  	CsvDelay uint32
  5782  
  5783  	// ClaimOutpoint is the final outpoint that needs to be spent in order
  5784  	// to fully sweep the HTLC. The input.SignDescriptor below should be used to
  5785  	// spend this outpoint. In the case of a second-level HTLC (non-nil
  5786  	// SignedTimeoutTx), then we'll be spending a new transaction.
  5787  	// Otherwise, it'll be an output in the commitment transaction.
  5788  	ClaimOutpoint wire.OutPoint
  5789  
  5790  	// SweepSignDesc is a sign descriptor that has been populated with the
  5791  	// necessary items required to spend the sole output of the above
  5792  	// transaction.
  5793  	SweepSignDesc input.SignDescriptor
  5794  }
  5795  
  5796  // OutgoingHtlcResolution houses the information necessary to sweep any
  5797  // outgoing HTLC's after their contract has expired. This struct will be needed
  5798  // in one of two cases: the local party force closes the commitment transaction
  5799  // or the remote party unilaterally closes with their version of the commitment
  5800  // transaction.
  5801  type OutgoingHtlcResolution struct {
  5802  	// Expiry the absolute timeout of the HTLC. This value is expressed in
  5803  	// block height, meaning after this height the HLTC can be swept.
  5804  	Expiry uint32
  5805  
  5806  	// SignedTimeoutTx is the fully signed HTLC timeout transaction. This
  5807  	// must be broadcast immediately after timeout has passed. Once this
  5808  	// has been confirmed, the HTLC output will transition into the
  5809  	// delay+claim state.
  5810  	//
  5811  	// NOTE: If this field is nil, then this indicates that we don't need
  5812  	// to go to the second level to claim this HTLC. Instead, it can be
  5813  	// claimed directly from the outpoint listed below.
  5814  	SignedTimeoutTx *wire.MsgTx
  5815  
  5816  	// SignDetails is non-nil if SignedTimeoutTx is non-nil, and the
  5817  	// channel is of the anchor type. As the above HTLC transaction will be
  5818  	// signed by the channel peer using SINGLE|ANYONECANPAY for such
  5819  	// channels, we can use the sign details to add the input-output pair
  5820  	// of the HTLC transaction to another transaction, thereby aggregating
  5821  	// multiple HTLC transactions together, and adding fees as needed.
  5822  	SignDetails *input.SignDetails
  5823  
  5824  	// CsvDelay is the relative time lock (expressed in blocks) that must
  5825  	// pass after the SignedTimeoutTx is confirmed in the chain before the
  5826  	// output can be swept.
  5827  	//
  5828  	// NOTE: If SignedTimeoutTx is nil, then this field denotes the CSV
  5829  	// delay needed to spend from the commitment transaction.
  5830  	CsvDelay uint32
  5831  
  5832  	// ClaimOutpoint is the final outpoint that needs to be spent in order
  5833  	// to fully sweep the HTLC. The input.SignDescriptor below should be used to
  5834  	// spend this outpoint. In the case of a second-level HTLC (non-nil
  5835  	// SignedTimeoutTx), then we'll be spending a new transaction.
  5836  	// Otherwise, it'll be an output in the commitment transaction.
  5837  	ClaimOutpoint wire.OutPoint
  5838  
  5839  	// SweepSignDesc is a sign descriptor that has been populated with the
  5840  	// necessary items required to spend the sole output of the above
  5841  	// transaction.
  5842  	SweepSignDesc input.SignDescriptor
  5843  }
  5844  
  5845  // HtlcResolutions contains the items necessary to sweep HTLC's on chain
  5846  // directly from a commitment transaction. We'll use this in case either party
  5847  // goes broadcasts a commitment transaction with live HTLC's.
  5848  type HtlcResolutions struct {
  5849  	// IncomingHTLCs contains a set of structs that can be used to sweep
  5850  	// all the incoming HTL'C that we know the preimage to.
  5851  	IncomingHTLCs []IncomingHtlcResolution
  5852  
  5853  	// OutgoingHTLCs contains a set of structs that contains all the info
  5854  	// needed to sweep an outgoing HTLC we've sent to the remote party
  5855  	// after an absolute delay has expired.
  5856  	OutgoingHTLCs []OutgoingHtlcResolution
  5857  }
  5858  
  5859  // newOutgoingHtlcResolution generates a new HTLC resolution capable of
  5860  // allowing the caller to sweep an outgoing HTLC present on either their, or
  5861  // the remote party's commitment transaction.
  5862  func newOutgoingHtlcResolution(signer input.Signer,
  5863  	localChanCfg *channeldb.ChannelConfig, commitTx *wire.MsgTx,
  5864  	htlc *channeldb.HTLC, keyRing *CommitmentKeyRing,
  5865  	feePerKB chainfee.AtomPerKByte, csvDelay, leaseExpiry uint32,
  5866  	localCommit, isCommitFromInitiator bool,
  5867  	chanType channeldb.ChannelType) (*OutgoingHtlcResolution, error) {
  5868  
  5869  	op := wire.OutPoint{
  5870  		Hash:  commitTx.TxHash(),
  5871  		Index: uint32(htlc.OutputIndex),
  5872  	}
  5873  
  5874  	// First, we'll re-generate the script used to send the HTLC to
  5875  	// the remote party within their commitment transaction.
  5876  	htlcScriptHash, htlcScript, err := genHtlcScript(
  5877  		chanType, false, localCommit, htlc.RefundTimeout, htlc.RHash,
  5878  		keyRing,
  5879  	)
  5880  	if err != nil {
  5881  		return nil, err
  5882  	}
  5883  
  5884  	// If we're spending this HTLC output from the remote node's
  5885  	// commitment, then we won't need to go to the second level as our
  5886  	// outputs don't have a CSV delay.
  5887  	if !localCommit {
  5888  		// With the script generated, we can completely populated the
  5889  		// input.SignDescriptor needed to sweep the output.
  5890  		return &OutgoingHtlcResolution{
  5891  			Expiry:        htlc.RefundTimeout,
  5892  			ClaimOutpoint: op,
  5893  			SweepSignDesc: input.SignDescriptor{
  5894  				KeyDesc:       localChanCfg.HtlcBasePoint,
  5895  				SingleTweak:   keyRing.LocalHtlcKeyTweak,
  5896  				WitnessScript: htlcScript,
  5897  				Output: &wire.TxOut{
  5898  					PkScript: htlcScriptHash,
  5899  					Value:    int64(htlc.Amt.ToAtoms()),
  5900  				},
  5901  				HashType: txscript.SigHashAll,
  5902  			},
  5903  			CsvDelay: HtlcSecondLevelInputSequence(chanType),
  5904  		}, nil
  5905  	}
  5906  
  5907  	// Otherwise, we'll need to craft a second level HTLC transaction, as
  5908  	// well as a sign desc to sweep after the CSV delay.
  5909  
  5910  	// In order to properly reconstruct the HTLC transaction, we'll need to
  5911  	// re-calculate the fee required at this state, so we can add the
  5912  	// correct output value amount to the transaction.
  5913  	htlcFee := HtlcTimeoutFee(chanType, feePerKB)
  5914  	secondLevelOutputAmt := htlc.Amt.ToAtoms() - htlcFee
  5915  
  5916  	// With the fee calculated, re-construct the second level timeout
  5917  	// transaction.
  5918  	timeoutTx, err := CreateHtlcTimeoutTx(
  5919  		chanType, isCommitFromInitiator, op, secondLevelOutputAmt,
  5920  		htlc.RefundTimeout, csvDelay, leaseExpiry, keyRing.RevocationKey,
  5921  		keyRing.ToLocalKey,
  5922  	)
  5923  	if err != nil {
  5924  		return nil, err
  5925  	}
  5926  
  5927  	// With the transaction created, we can generate a sign descriptor
  5928  	// that's capable of generating the signature required to spend the
  5929  	// HTLC output using the timeout transaction.
  5930  	txOut := commitTx.TxOut[htlc.OutputIndex]
  5931  	timeoutSignDesc := input.SignDescriptor{
  5932  		KeyDesc:       localChanCfg.HtlcBasePoint,
  5933  		SingleTweak:   keyRing.LocalHtlcKeyTweak,
  5934  		WitnessScript: htlcScript,
  5935  		Output:        txOut,
  5936  		HashType:      txscript.SigHashAll,
  5937  		InputIndex:    0,
  5938  	}
  5939  
  5940  	htlcSig, err := ecdsa.ParseDERSignature(htlc.Signature)
  5941  	if err != nil {
  5942  		return nil, err
  5943  	}
  5944  
  5945  	// With the sign desc created, we can now construct the full witness
  5946  	// for the timeout transaction, and populate it as well.
  5947  	sigHashType := HtlcSigHashType(chanType)
  5948  	timeoutWitness, err := input.SenderHtlcSpendTimeout(
  5949  		htlcSig, sigHashType, signer, &timeoutSignDesc, timeoutTx,
  5950  	)
  5951  	if err != nil {
  5952  		return nil, err
  5953  	}
  5954  	sigScript, err := input.WitnessStackToSigScript(timeoutWitness)
  5955  	if err != nil {
  5956  		return nil, err
  5957  	}
  5958  	timeoutTx.TxIn[0].SignatureScript = sigScript
  5959  
  5960  	// If this is an anchor type channel, the sign details will let us
  5961  	// re-sign an aggregated tx later.
  5962  	txSignDetails := HtlcSignDetails(
  5963  		chanType, timeoutSignDesc, sigHashType, htlcSig,
  5964  	)
  5965  
  5966  	// Finally, we'll generate the script output that the timeout
  5967  	// transaction creates so we can generate the signDesc required to
  5968  	// complete the claim process after a delay period.
  5969  	htlcSweepScript, err := SecondLevelHtlcScript(
  5970  		chanType, isCommitFromInitiator, keyRing.RevocationKey,
  5971  		keyRing.ToLocalKey, csvDelay, leaseExpiry,
  5972  	)
  5973  	if err != nil {
  5974  		return nil, err
  5975  	}
  5976  
  5977  	localDelayTweak := input.SingleTweakBytes(
  5978  		keyRing.CommitPoint, localChanCfg.DelayBasePoint.PubKey,
  5979  	)
  5980  	return &OutgoingHtlcResolution{
  5981  		Expiry:          htlc.RefundTimeout,
  5982  		SignedTimeoutTx: timeoutTx,
  5983  		SignDetails:     txSignDetails,
  5984  		CsvDelay:        csvDelay,
  5985  		ClaimOutpoint: wire.OutPoint{
  5986  			Hash:  timeoutTx.TxHash(),
  5987  			Index: 0,
  5988  		},
  5989  		SweepSignDesc: input.SignDescriptor{
  5990  			KeyDesc:       localChanCfg.DelayBasePoint,
  5991  			SingleTweak:   localDelayTweak,
  5992  			WitnessScript: htlcSweepScript.WitnessScript,
  5993  			Output: &wire.TxOut{
  5994  				PkScript: htlcSweepScript.PkScript,
  5995  				Value:    int64(secondLevelOutputAmt),
  5996  			},
  5997  			HashType: txscript.SigHashAll,
  5998  		},
  5999  	}, nil
  6000  }
  6001  
  6002  // newIncomingHtlcResolution creates a new HTLC resolution capable of allowing
  6003  // the caller to sweep an incoming HTLC. If the HTLC is on the caller's
  6004  // commitment transaction, then they'll need to broadcast a second-level
  6005  // transaction before sweeping the output (and incur a CSV delay). Otherwise,
  6006  // they can just sweep the output immediately with knowledge of the pre-image.
  6007  //
  6008  // TODO(roasbeef) consolidate code with above func
  6009  func newIncomingHtlcResolution(signer input.Signer,
  6010  	localChanCfg *channeldb.ChannelConfig, commitTx *wire.MsgTx,
  6011  	htlc *channeldb.HTLC, keyRing *CommitmentKeyRing,
  6012  	feePerKB chainfee.AtomPerKByte, csvDelay, leaseExpiry uint32,
  6013  	localCommit, isCommitFromInitiator bool, chanType channeldb.ChannelType) (
  6014  	*IncomingHtlcResolution, error) {
  6015  
  6016  	op := wire.OutPoint{
  6017  		Hash:  commitTx.TxHash(),
  6018  		Index: uint32(htlc.OutputIndex),
  6019  	}
  6020  
  6021  	// First, we'll re-generate the script the remote party used to
  6022  	// send the HTLC to us in their commitment transaction.
  6023  	htlcScriptHash, htlcScript, err := genHtlcScript(
  6024  		chanType, true, localCommit, htlc.RefundTimeout, htlc.RHash,
  6025  		keyRing,
  6026  	)
  6027  	if err != nil {
  6028  		return nil, err
  6029  	}
  6030  
  6031  	// If we're spending this output from the remote node's commitment,
  6032  	// then we can skip the second layer and spend the output directly.
  6033  	if !localCommit {
  6034  		// With the script generated, we can completely populated the
  6035  		// input.SignDescriptor needed to sweep the output.
  6036  		return &IncomingHtlcResolution{
  6037  			ClaimOutpoint: op,
  6038  			SweepSignDesc: input.SignDescriptor{
  6039  				KeyDesc:       localChanCfg.HtlcBasePoint,
  6040  				SingleTweak:   keyRing.LocalHtlcKeyTweak,
  6041  				WitnessScript: htlcScript,
  6042  				Output: &wire.TxOut{
  6043  					PkScript: htlcScriptHash,
  6044  					Value:    int64(htlc.Amt.ToAtoms()),
  6045  				},
  6046  				HashType: txscript.SigHashAll,
  6047  			},
  6048  			CsvDelay: HtlcSecondLevelInputSequence(chanType),
  6049  		}, nil
  6050  	}
  6051  
  6052  	// Otherwise, we'll need to go to the second level to sweep this HTLC.
  6053  
  6054  	// First, we'll reconstruct the original HTLC success transaction,
  6055  	// taking into account the fee rate used.
  6056  	htlcFee := HtlcSuccessFee(chanType, feePerKB)
  6057  	secondLevelOutputAmt := htlc.Amt.ToAtoms() - htlcFee
  6058  	successTx, err := CreateHtlcSuccessTx(
  6059  		chanType, isCommitFromInitiator, op, secondLevelOutputAmt,
  6060  		csvDelay, leaseExpiry, keyRing.RevocationKey, keyRing.ToLocalKey,
  6061  	)
  6062  	if err != nil {
  6063  		return nil, err
  6064  	}
  6065  
  6066  	// Once we've created the second-level transaction, we'll generate the
  6067  	// SignDesc needed spend the HTLC output using the success transaction.
  6068  	txOut := commitTx.TxOut[htlc.OutputIndex]
  6069  	successSignDesc := input.SignDescriptor{
  6070  		KeyDesc:       localChanCfg.HtlcBasePoint,
  6071  		SingleTweak:   keyRing.LocalHtlcKeyTweak,
  6072  		WitnessScript: htlcScript,
  6073  		Output:        txOut,
  6074  		HashType:      txscript.SigHashAll,
  6075  		InputIndex:    0,
  6076  	}
  6077  
  6078  	htlcSig, err := ecdsa.ParseDERSignature(htlc.Signature)
  6079  	if err != nil {
  6080  		return nil, err
  6081  	}
  6082  
  6083  	// Next, we'll construct the full witness needed to satisfy the input of
  6084  	// the success transaction. Don't specify the preimage yet. The preimage
  6085  	// will be supplied by the contract resolver, either directly or when it
  6086  	// becomes known.
  6087  	sigHashType := HtlcSigHashType(chanType)
  6088  	successWitness, err := input.ReceiverHtlcSpendRedeem(
  6089  		htlcSig, sigHashType, nil, signer, &successSignDesc, successTx,
  6090  	)
  6091  	if err != nil {
  6092  		return nil, err
  6093  	}
  6094  	successTx.TxIn[0].SignatureScript, err = input.WitnessStackToSigScript(successWitness)
  6095  	if err != nil {
  6096  		return nil, err
  6097  	}
  6098  
  6099  	// If this is an anchor type channel, the sign details will let us
  6100  	// re-sign an aggregated tx later.
  6101  	txSignDetails := HtlcSignDetails(
  6102  		chanType, successSignDesc, sigHashType, htlcSig,
  6103  	)
  6104  
  6105  	// Finally, we'll generate the script that the second-level transaction
  6106  	// creates so we can generate the proper signDesc to sweep it after the
  6107  	// CSV delay has passed.
  6108  	htlcSweepScript, err := SecondLevelHtlcScript(
  6109  		chanType, isCommitFromInitiator, keyRing.RevocationKey,
  6110  		keyRing.ToLocalKey, csvDelay, leaseExpiry,
  6111  	)
  6112  	if err != nil {
  6113  		return nil, err
  6114  	}
  6115  
  6116  	localDelayTweak := input.SingleTweakBytes(
  6117  		keyRing.CommitPoint, localChanCfg.DelayBasePoint.PubKey,
  6118  	)
  6119  	return &IncomingHtlcResolution{
  6120  		SignedSuccessTx: successTx,
  6121  		SignDetails:     txSignDetails,
  6122  		CsvDelay:        csvDelay,
  6123  		ClaimOutpoint: wire.OutPoint{
  6124  			Hash:  successTx.TxHash(),
  6125  			Index: 0,
  6126  		},
  6127  		SweepSignDesc: input.SignDescriptor{
  6128  			KeyDesc:       localChanCfg.DelayBasePoint,
  6129  			SingleTweak:   localDelayTweak,
  6130  			WitnessScript: htlcSweepScript.WitnessScript,
  6131  			Output: &wire.TxOut{
  6132  				PkScript: htlcSweepScript.PkScript,
  6133  				Value:    int64(secondLevelOutputAmt),
  6134  			},
  6135  			HashType: txscript.SigHashAll,
  6136  		},
  6137  	}, nil
  6138  }
  6139  
  6140  // HtlcPoint returns the htlc's outpoint on the commitment tx.
  6141  func (r *IncomingHtlcResolution) HtlcPoint() wire.OutPoint {
  6142  	// If we have a success transaction, then the htlc's outpoint
  6143  	// is the transaction's only input. Otherwise, it's the claim
  6144  	// point.
  6145  	if r.SignedSuccessTx != nil {
  6146  		return r.SignedSuccessTx.TxIn[0].PreviousOutPoint
  6147  	}
  6148  
  6149  	return r.ClaimOutpoint
  6150  }
  6151  
  6152  // HtlcPoint returns the htlc's outpoint on the commitment tx.
  6153  func (r *OutgoingHtlcResolution) HtlcPoint() wire.OutPoint {
  6154  	// If we have a timeout transaction, then the htlc's outpoint
  6155  	// is the transaction's only input. Otherwise, it's the claim
  6156  	// point.
  6157  	if r.SignedTimeoutTx != nil {
  6158  		return r.SignedTimeoutTx.TxIn[0].PreviousOutPoint
  6159  	}
  6160  
  6161  	return r.ClaimOutpoint
  6162  }
  6163  
  6164  // extractHtlcResolutions creates a series of outgoing HTLC resolutions, and
  6165  // the local key used when generating the HTLC scrips. This function is to be
  6166  // used in two cases: force close, or a unilateral close.
  6167  func extractHtlcResolutions(feePerKB chainfee.AtomPerKByte, ourCommit bool,
  6168  	signer input.Signer, htlcs []channeldb.HTLC, keyRing *CommitmentKeyRing,
  6169  	localChanCfg, remoteChanCfg *channeldb.ChannelConfig,
  6170  	commitTx *wire.MsgTx, chanType channeldb.ChannelType,
  6171  	isCommitFromInitiator bool, leaseExpiry uint32) (*HtlcResolutions, error) {
  6172  
  6173  	// TODO(roasbeef): don't need to swap csv delay?
  6174  	dustLimit := remoteChanCfg.DustLimit
  6175  	csvDelay := remoteChanCfg.CsvDelay
  6176  	if ourCommit {
  6177  		dustLimit = localChanCfg.DustLimit
  6178  		csvDelay = localChanCfg.CsvDelay
  6179  	}
  6180  
  6181  	incomingResolutions := make([]IncomingHtlcResolution, 0, len(htlcs))
  6182  	outgoingResolutions := make([]OutgoingHtlcResolution, 0, len(htlcs))
  6183  	for _, htlc := range htlcs {
  6184  		htlc := htlc
  6185  
  6186  		// We'll skip any HTLC's which were dust on the commitment
  6187  		// transaction, as these don't have a corresponding output
  6188  		// within the commitment transaction.
  6189  		if HtlcIsDust(
  6190  			chanType, htlc.Incoming, ourCommit, feePerKB,
  6191  			htlc.Amt.ToAtoms(), dustLimit,
  6192  		) {
  6193  			continue
  6194  		}
  6195  
  6196  		// If the HTLC is incoming, then we'll attempt to see if we
  6197  		// know the pre-image to the HTLC.
  6198  		if htlc.Incoming {
  6199  			// Otherwise, we'll create an incoming HTLC resolution
  6200  			// as we can satisfy the contract.
  6201  			ihr, err := newIncomingHtlcResolution(
  6202  				signer, localChanCfg, commitTx, &htlc,
  6203  				keyRing, feePerKB, uint32(csvDelay), leaseExpiry,
  6204  				ourCommit, isCommitFromInitiator, chanType,
  6205  			)
  6206  			if err != nil {
  6207  				return nil, err
  6208  			}
  6209  
  6210  			incomingResolutions = append(incomingResolutions, *ihr)
  6211  			continue
  6212  		}
  6213  
  6214  		ohr, err := newOutgoingHtlcResolution(
  6215  			signer, localChanCfg, commitTx, &htlc, keyRing,
  6216  			feePerKB, uint32(csvDelay), leaseExpiry, ourCommit,
  6217  			isCommitFromInitiator, chanType,
  6218  		)
  6219  		if err != nil {
  6220  			return nil, err
  6221  		}
  6222  
  6223  		outgoingResolutions = append(outgoingResolutions, *ohr)
  6224  	}
  6225  
  6226  	return &HtlcResolutions{
  6227  		IncomingHTLCs: incomingResolutions,
  6228  		OutgoingHTLCs: outgoingResolutions,
  6229  	}, nil
  6230  }
  6231  
  6232  // AnchorResolution holds the information necessary to spend our commitment tx
  6233  // anchor.
  6234  type AnchorResolution struct {
  6235  	// AnchorSignDescriptor is the sign descriptor for our anchor.
  6236  	AnchorSignDescriptor input.SignDescriptor
  6237  
  6238  	// CommitAnchor is the anchor outpoint on the commit tx.
  6239  	CommitAnchor wire.OutPoint
  6240  
  6241  	// CommitFee is the fee of the commit tx.
  6242  	CommitFee dcrutil.Amount
  6243  
  6244  	// CommitSize is the size of the commit tx.
  6245  	CommitSize int64
  6246  }
  6247  
  6248  // LocalForceCloseSummary describes the final commitment state before the
  6249  // channel is locked-down to initiate a force closure by broadcasting the
  6250  // latest state on-chain. If we intend to broadcast this this state, the
  6251  // channel should not be used after generating this close summary.  The summary
  6252  // includes all the information required to claim all rightfully owned outputs
  6253  // when the commitment gets confirmed.
  6254  type LocalForceCloseSummary struct {
  6255  	// ChanPoint is the outpoint that created the channel which has been
  6256  	// force closed.
  6257  	ChanPoint wire.OutPoint
  6258  
  6259  	// CloseTx is the transaction which can be used to close the channel
  6260  	// on-chain. When we initiate a force close, this will be our latest
  6261  	// commitment state.
  6262  	CloseTx *wire.MsgTx
  6263  
  6264  	// CommitResolution contains all the data required to sweep the output
  6265  	// to ourselves. Since this is our commitment transaction, we'll need
  6266  	// to wait a time delay before we can sweep the output.
  6267  	//
  6268  	// NOTE: If our commitment delivery output is below the dust limit,
  6269  	// then this will be nil.
  6270  	CommitResolution *CommitOutputResolution
  6271  
  6272  	// HtlcResolutions contains all the data required to sweep any outgoing
  6273  	// HTLC's and incoming HTLc's we know the preimage to. For each of these
  6274  	// HTLC's, we'll need to go to the second level to sweep them fully.
  6275  	HtlcResolutions *HtlcResolutions
  6276  
  6277  	// ChanSnapshot is a snapshot of the final state of the channel at the
  6278  	// time the summary was created.
  6279  	ChanSnapshot channeldb.ChannelSnapshot
  6280  
  6281  	// AnchorResolution contains the data required to sweep the anchor
  6282  	// output. If the channel type doesn't include anchors, the value of
  6283  	// this field will be nil.
  6284  	AnchorResolution *AnchorResolution
  6285  }
  6286  
  6287  // ForceClose executes a unilateral closure of the transaction at the current
  6288  // lowest commitment height of the channel. Following a force closure, all
  6289  // state transitions, or modifications to the state update logs will be
  6290  // rejected. Additionally, this function also returns a LocalForceCloseSummary
  6291  // which includes the necessary details required to sweep all the time-locked
  6292  // outputs within the commitment transaction.
  6293  //
  6294  // TODO(roasbeef): all methods need to abort if in dispute state
  6295  // TODO(roasbeef): method to generate CloseSummaries for when the remote peer
  6296  // does a unilateral close
  6297  func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) {
  6298  	lc.Lock()
  6299  	defer lc.Unlock()
  6300  
  6301  	// If we've detected local data loss for this channel, then we won't
  6302  	// allow a force close, as it may be the case that we have a dated
  6303  	// version of the commitment, or this is actually a channel shell.
  6304  	if lc.channelState.HasChanStatus(channeldb.ChanStatusLocalDataLoss) {
  6305  		return nil, fmt.Errorf("cannot force close channel with "+
  6306  			"state: %v", lc.channelState.ChanStatus())
  6307  	}
  6308  
  6309  	commitTx, err := lc.getSignedCommitTx()
  6310  	if err != nil {
  6311  		return nil, err
  6312  	}
  6313  
  6314  	localCommitment := lc.channelState.LocalCommitment
  6315  	summary, err := NewLocalForceCloseSummary(
  6316  		lc.channelState, lc.Signer, commitTx,
  6317  		localCommitment.CommitHeight,
  6318  	)
  6319  	if err != nil {
  6320  		return nil, err
  6321  	}
  6322  
  6323  	// Set the channel state to indicate that the channel is now in a
  6324  	// contested state.
  6325  	lc.status = channelDispute
  6326  
  6327  	return summary, nil
  6328  }
  6329  
  6330  // NewLocalForceCloseSummary generates a LocalForceCloseSummary from the given
  6331  // channel state.  The passed commitTx must be a fully signed commitment
  6332  // transaction corresponding to localCommit.
  6333  func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel,
  6334  	signer input.Signer, commitTx *wire.MsgTx, stateNum uint64) (
  6335  	*LocalForceCloseSummary, error) {
  6336  
  6337  	// Re-derive the original pkScript for to-self output within the
  6338  	// commitment transaction. We'll need this to find the corresponding
  6339  	// output in the commitment transaction and potentially for creating
  6340  	// the sign descriptor.
  6341  	csvTimeout := uint32(chanState.LocalChanCfg.CsvDelay)
  6342  
  6343  	// We use the passed state num to derive our scripts, since in case
  6344  	// this is after recovery, our latest channels state might not be up to
  6345  	// date.
  6346  	revocation, err := chanState.RevocationProducer.AtIndex(stateNum)
  6347  	if err != nil {
  6348  		return nil, err
  6349  	}
  6350  	commitPoint := input.ComputeCommitmentPoint(revocation[:])
  6351  	keyRing := DeriveCommitmentKeys(
  6352  		commitPoint, true, chanState.ChanType,
  6353  		&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
  6354  	)
  6355  
  6356  	var leaseExpiry uint32
  6357  	if chanState.ChanType.HasLeaseExpiration() {
  6358  		leaseExpiry = chanState.ThawHeight
  6359  	}
  6360  	toLocalScript, err := CommitScriptToSelf(
  6361  		chanState.ChanType, chanState.IsInitiator, keyRing.ToLocalKey,
  6362  		keyRing.RevocationKey, csvTimeout, leaseExpiry,
  6363  	)
  6364  	if err != nil {
  6365  		return nil, err
  6366  	}
  6367  
  6368  	// Locate the output index of the delayed commitment output back to us.
  6369  	// We'll return the details of this output to the caller so they can
  6370  	// sweep it once it's mature.
  6371  	var (
  6372  		delayIndex uint32
  6373  		delayOut   *wire.TxOut
  6374  	)
  6375  	for i, txOut := range commitTx.TxOut {
  6376  		if !bytes.Equal(toLocalScript.PkScript, txOut.PkScript) {
  6377  			continue
  6378  		}
  6379  
  6380  		delayIndex = uint32(i)
  6381  		delayOut = txOut
  6382  		break
  6383  	}
  6384  
  6385  	// With the necessary information gathered above, create a new sign
  6386  	// descriptor which is capable of generating the signature the caller
  6387  	// needs to sweep this output. The hash cache, and input index are not
  6388  	// set as the caller will decide these values once sweeping the output.
  6389  	// If the output is non-existent (dust), have the sign descriptor be
  6390  	// nil.
  6391  	var commitResolution *CommitOutputResolution
  6392  	if delayOut != nil {
  6393  		localBalance := delayOut.Value
  6394  		commitResolution = &CommitOutputResolution{
  6395  			SelfOutPoint: wire.OutPoint{
  6396  				Hash:  commitTx.TxHash(),
  6397  				Index: delayIndex,
  6398  			},
  6399  			SelfOutputSignDesc: input.SignDescriptor{
  6400  				KeyDesc:       chanState.LocalChanCfg.DelayBasePoint,
  6401  				SingleTweak:   keyRing.LocalCommitKeyTweak,
  6402  				WitnessScript: toLocalScript.WitnessScript,
  6403  				Output: &wire.TxOut{
  6404  					PkScript: delayOut.PkScript,
  6405  					Value:    localBalance,
  6406  				},
  6407  				HashType: txscript.SigHashAll,
  6408  			},
  6409  			MaturityDelay: csvTimeout,
  6410  		}
  6411  	}
  6412  
  6413  	// Once the delay output has been found (if it exists), then we'll also
  6414  	// need to create a series of sign descriptors for any lingering
  6415  	// outgoing HTLC's that we'll need to claim as well. If this is after
  6416  	// recovery there is not much we can do with HTLCs, so we'll always
  6417  	// use what we have in our latest state when extracting resolutions.
  6418  	localCommit := chanState.LocalCommitment
  6419  	htlcResolutions, err := extractHtlcResolutions(
  6420  		chainfee.AtomPerKByte(localCommit.FeePerKB), true, signer,
  6421  		localCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
  6422  		&chanState.RemoteChanCfg, commitTx, chanState.ChanType,
  6423  		chanState.IsInitiator, leaseExpiry,
  6424  	)
  6425  	if err != nil {
  6426  		return nil, err
  6427  	}
  6428  
  6429  	anchorResolution, err := NewAnchorResolution(
  6430  		chanState, commitTx,
  6431  	)
  6432  	if err != nil {
  6433  		return nil, err
  6434  	}
  6435  
  6436  	return &LocalForceCloseSummary{
  6437  		ChanPoint:        chanState.FundingOutpoint,
  6438  		CloseTx:          commitTx,
  6439  		CommitResolution: commitResolution,
  6440  		HtlcResolutions:  htlcResolutions,
  6441  		ChanSnapshot:     *chanState.Snapshot(),
  6442  		AnchorResolution: anchorResolution,
  6443  	}, nil
  6444  }
  6445  
  6446  // CreateCloseProposal is used by both parties in a cooperative channel close
  6447  // workflow to generate proposed close transactions and signatures. This method
  6448  // should only be executed once all pending HTLCs (if any) on the channel have
  6449  // been cleared/removed. Upon completion, the source channel will shift into
  6450  // the "closing" state, which indicates that all incoming/outgoing HTLC
  6451  // requests should be rejected. A signature for the closing transaction is
  6452  // returned.
  6453  //
  6454  // TODO(roasbeef): caller should initiate signal to reject all incoming HTLCs,
  6455  // settle any in flight.
  6456  func (lc *LightningChannel) CreateCloseProposal(proposedFee dcrutil.Amount,
  6457  	localDeliveryScript []byte,
  6458  	remoteDeliveryScript []byte) (input.Signature, *chainhash.Hash,
  6459  	dcrutil.Amount, error) {
  6460  
  6461  	lc.Lock()
  6462  	defer lc.Unlock()
  6463  
  6464  	// If we've already closed the channel, then ignore this request.
  6465  	if lc.status == channelClosed {
  6466  		// TODO(roasbeef): check to ensure no pending payments
  6467  		return nil, nil, 0, ErrChanClosing
  6468  	}
  6469  
  6470  	// Get the final balances after subtracting the proposed fee, taking
  6471  	// care not to persist the adjusted balance, as the feeRate may change
  6472  	// during the channel closing process.
  6473  	ourBalance, theirBalance, err := CoopCloseBalance(
  6474  		lc.channelState.ChanType, lc.channelState.IsInitiator,
  6475  		proposedFee, lc.channelState.LocalCommitment,
  6476  	)
  6477  	if err != nil {
  6478  		return nil, nil, 0, err
  6479  	}
  6480  
  6481  	closeTx := CreateCooperativeCloseTx(
  6482  		fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit,
  6483  		lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance,
  6484  		localDeliveryScript, remoteDeliveryScript,
  6485  	)
  6486  
  6487  	// Ensure that the transaction doesn't explicitly violate any
  6488  	// consensus rules such as being too big, or having any value with a
  6489  	// negative output.
  6490  	if err := standalone.CheckTransactionSanity(closeTx, uint64(lc.netParams.MaxTxSize)); err != nil {
  6491  		return nil, nil, 0, fmt.Errorf("transaction not sane: %v", err)
  6492  	}
  6493  
  6494  	// Finally, sign the completed cooperative closure transaction. As the
  6495  	// initiator we'll simply send our signature over to the remote party,
  6496  	// using the generated txid to be notified once the closure transaction
  6497  	// has been confirmed.
  6498  	sig, err := lc.Signer.SignOutputRaw(closeTx, lc.signDesc)
  6499  	if err != nil {
  6500  		return nil, nil, 0, err
  6501  	}
  6502  
  6503  	// As everything checks out, indicate in the channel status that a
  6504  	// channel closure has been initiated.
  6505  	lc.status = channelClosing
  6506  
  6507  	closeTXID := closeTx.TxHash()
  6508  	return sig, &closeTXID, ourBalance, nil
  6509  }
  6510  
  6511  // CompleteCooperativeClose completes the cooperative closure of the target
  6512  // active lightning channel. A fully signed closure transaction as well as the
  6513  // signature itself are returned. Additionally, we also return our final
  6514  // settled balance, which reflects any fees we may have paid.
  6515  //
  6516  // NOTE: The passed local and remote sigs are expected to be fully complete
  6517  // signatures including the proper sighash byte.
  6518  func (lc *LightningChannel) CompleteCooperativeClose(
  6519  	localSig, remoteSig input.Signature,
  6520  	localDeliveryScript, remoteDeliveryScript []byte,
  6521  	proposedFee dcrutil.Amount) (*wire.MsgTx, dcrutil.Amount, error) {
  6522  
  6523  	lc.Lock()
  6524  	defer lc.Unlock()
  6525  
  6526  	// If the channel is already closed, then ignore this request.
  6527  	if lc.status == channelClosed {
  6528  		// TODO(roasbeef): check to ensure no pending payments
  6529  		return nil, 0, ErrChanClosing
  6530  	}
  6531  
  6532  	// Get the final balances after subtracting the proposed fee.
  6533  	ourBalance, theirBalance, err := CoopCloseBalance(
  6534  		lc.channelState.ChanType, lc.channelState.IsInitiator,
  6535  		proposedFee, lc.channelState.LocalCommitment,
  6536  	)
  6537  	if err != nil {
  6538  		return nil, 0, err
  6539  	}
  6540  
  6541  	// Create the transaction used to return the current settled balance
  6542  	// on this active channel back to both parties. In this current model,
  6543  	// the initiator pays full fees for the cooperative close transaction.
  6544  	closeTx := CreateCooperativeCloseTx(
  6545  		fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit,
  6546  		lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance,
  6547  		localDeliveryScript, remoteDeliveryScript,
  6548  	)
  6549  
  6550  	// Ensure that the transaction doesn't explicitly validate any
  6551  	// consensus rules such as being too big, or having any value with a
  6552  	// negative output.
  6553  	if err := standalone.CheckTransactionSanity(closeTx, uint64(lc.netParams.MaxTxSize)); err != nil {
  6554  		return nil, 0, err
  6555  	}
  6556  
  6557  	// Finally, construct the witness stack minding the order of the
  6558  	// pubkeys+sigs on the stack.
  6559  	ourKey := lc.channelState.LocalChanCfg.MultiSigKey.PubKey.
  6560  		SerializeCompressed()
  6561  	theirKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey.
  6562  		SerializeCompressed()
  6563  	witness := input.SpendMultiSig(
  6564  		lc.signDesc.WitnessScript, ourKey, localSig, theirKey,
  6565  		remoteSig,
  6566  	)
  6567  	sigScript, err := input.WitnessStackToSigScript(witness)
  6568  	if err != nil {
  6569  		return nil, 0, err
  6570  	}
  6571  	closeTx.TxIn[0].SignatureScript = sigScript
  6572  
  6573  	// Validate the finalized transaction to ensure the output script is
  6574  	// properly met, and that the remote peer supplied a valid signature.
  6575  	prevOut := lc.signDesc.Output
  6576  	vm, err := txscript.NewEngine(prevOut.PkScript, closeTx, 0,
  6577  		input.ScriptVerifyFlags, prevOut.Version, nil)
  6578  	if err != nil {
  6579  		return nil, 0, err
  6580  	}
  6581  	if err := vm.Execute(); err != nil {
  6582  		return nil, 0, err
  6583  	}
  6584  
  6585  	// As the transaction is sane, and the scripts are valid we'll mark the
  6586  	// channel now as closed as the closure transaction should get into the
  6587  	// chain in a timely manner and possibly be re-broadcast by the wallet.
  6588  	lc.status = channelClosed
  6589  
  6590  	return closeTx, ourBalance, nil
  6591  }
  6592  
  6593  // AnchorResolutions is a set of anchor resolutions that's being used when
  6594  // sweeping anchors during local channel force close.
  6595  type AnchorResolutions struct {
  6596  	// Local is the anchor resolution for the local commitment tx.
  6597  	Local *AnchorResolution
  6598  
  6599  	// Remote is the anchor resolution for the remote commitment tx.
  6600  	Remote *AnchorResolution
  6601  
  6602  	// RemotePending is the anchor resolution for the remote pending
  6603  	// commitment tx. The value will be non-nil iff we've created a new
  6604  	// commitment tx for the remote party which they haven't ACKed yet.
  6605  	RemotePending *AnchorResolution
  6606  }
  6607  
  6608  // NewAnchorResolutions returns a set of anchor resolutions wrapped in the
  6609  // struct AnchorResolutions. Because we have no view on the mempool, we can
  6610  // only blindly anchor all of these txes down. Caller needs to check the
  6611  // returned values against nil to decide whether there exists an anchor
  6612  // resolution for local/remote/pending remote commitment txes.
  6613  func (lc *LightningChannel) NewAnchorResolutions() (*AnchorResolutions,
  6614  	error) {
  6615  
  6616  	lc.Lock()
  6617  	defer lc.Unlock()
  6618  
  6619  	resolutions := &AnchorResolutions{}
  6620  
  6621  	// Add anchor for local commitment tx, if any.
  6622  	localRes, err := NewAnchorResolution(
  6623  		lc.channelState, lc.channelState.LocalCommitment.CommitTx,
  6624  	)
  6625  	if err != nil {
  6626  		return nil, err
  6627  	}
  6628  	resolutions.Local = localRes
  6629  
  6630  	// Add anchor for remote commitment tx, if any.
  6631  	remoteRes, err := NewAnchorResolution(
  6632  		lc.channelState, lc.channelState.RemoteCommitment.CommitTx,
  6633  	)
  6634  	if err != nil {
  6635  		return nil, err
  6636  	}
  6637  	resolutions.Remote = remoteRes
  6638  
  6639  	// Add anchor for remote pending commitment tx, if any.
  6640  	remotePendingCommit, err := lc.channelState.RemoteCommitChainTip()
  6641  	if err != nil && err != channeldb.ErrNoPendingCommit {
  6642  		return nil, err
  6643  	}
  6644  
  6645  	if remotePendingCommit != nil {
  6646  		remotePendingRes, err := NewAnchorResolution(
  6647  			lc.channelState,
  6648  			remotePendingCommit.Commitment.CommitTx,
  6649  		)
  6650  		if err != nil {
  6651  			return nil, err
  6652  		}
  6653  		resolutions.RemotePending = remotePendingRes
  6654  	}
  6655  
  6656  	return resolutions, nil
  6657  }
  6658  
  6659  // NewAnchorResolution returns the information that is required to sweep the
  6660  // local anchor.
  6661  func NewAnchorResolution(chanState *channeldb.OpenChannel,
  6662  	commitTx *wire.MsgTx) (*AnchorResolution, error) {
  6663  
  6664  	// Return nil resolution if the channel has no anchors.
  6665  	if !chanState.ChanType.HasAnchors() {
  6666  		return nil, nil
  6667  	}
  6668  
  6669  	// Derive our local anchor script.
  6670  	localAnchor, _, err := CommitScriptAnchors(
  6671  		&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
  6672  	)
  6673  	if err != nil {
  6674  		return nil, err
  6675  	}
  6676  
  6677  	// Look up the script on the commitment transaction. It may not be
  6678  	// present if there is no output paying to us.
  6679  	found, index := input.FindScriptOutputIndex(commitTx, localAnchor.PkScript)
  6680  	if !found {
  6681  		return nil, nil
  6682  	}
  6683  
  6684  	outPoint := &wire.OutPoint{
  6685  		Hash:  commitTx.TxHash(),
  6686  		Index: index,
  6687  	}
  6688  
  6689  	// Instantiate the sign descriptor that allows sweeping of the anchor.
  6690  	signDesc := &input.SignDescriptor{
  6691  		KeyDesc:       chanState.LocalChanCfg.MultiSigKey,
  6692  		WitnessScript: localAnchor.WitnessScript,
  6693  		Output: &wire.TxOut{
  6694  			PkScript: localAnchor.PkScript,
  6695  			Value:    int64(anchorSize),
  6696  		},
  6697  		HashType: txscript.SigHashAll,
  6698  	}
  6699  
  6700  	// Calculate commit tx weight. This commit tx doesn't yet include the
  6701  	// witness spending the funding output, so we add the (worst case)
  6702  	// weight for that too.
  6703  	size := int64(commitTx.SerializeSize()) + input.FundingOutputSigScriptSize
  6704  
  6705  	// Calculate commit tx fee.
  6706  	fee := chanState.Capacity
  6707  	for _, out := range commitTx.TxOut {
  6708  		fee -= dcrutil.Amount(out.Value)
  6709  	}
  6710  
  6711  	return &AnchorResolution{
  6712  		CommitAnchor:         *outPoint,
  6713  		AnchorSignDescriptor: *signDesc,
  6714  		CommitSize:           size,
  6715  		CommitFee:            fee,
  6716  	}, nil
  6717  }
  6718  
  6719  // AvailableBalance returns the current balance available for sending within
  6720  // the channel. By available balance, we mean that if at this very instance a
  6721  // new commitment were to be created which evals all the log entries, what
  6722  // would our available balance for adding an additional HTLC be. It takes into
  6723  // account the fee that must be paid for adding this HTLC (if we're the
  6724  // initiator), and that we cannot spend from the channel reserve. This method
  6725  // is useful when deciding if a given channel can accept an HTLC in the
  6726  // multi-hop forwarding scenario.
  6727  func (lc *LightningChannel) AvailableBalance() lnwire.MilliAtom {
  6728  	lc.RLock()
  6729  	defer lc.RUnlock()
  6730  
  6731  	bal, _ := lc.availableBalance()
  6732  	return bal
  6733  }
  6734  
  6735  // availableBalance is the private, non mutexed version of AvailableBalance.
  6736  // This method is provided so methods that already hold the lock can access
  6737  // this method. Additionally, the total size of the next to be created
  6738  // commitment is returned for accounting purposes.
  6739  func (lc *LightningChannel) availableBalance() (lnwire.MilliAtom, int64) {
  6740  	// We'll grab the current set of log updates that the remote has
  6741  	// ACKed.
  6742  	remoteACKedIndex := lc.localCommitChain.tip().theirMessageIndex
  6743  	htlcView := lc.fetchHTLCView(remoteACKedIndex,
  6744  		lc.localUpdateLog.logIndex)
  6745  
  6746  	// Calculate our available balance from our local commitment.
  6747  	// TODO(halseth): could reuse parts validateCommitmentSanity to do this
  6748  	// balance calculation, as most of the logic is the same.
  6749  	//
  6750  	// NOTE: This is not always accurate, since the remote node can always
  6751  	// add updates concurrently, causing our balance to go down if we're
  6752  	// the initiator, but this is a problem on the protocol level.
  6753  	ourLocalCommitBalance, commitSize := lc.availableCommitmentBalance(
  6754  		htlcView, false,
  6755  	)
  6756  
  6757  	// Do the same calculation from the remote commitment point of view.
  6758  	ourRemoteCommitBalance, _ := lc.availableCommitmentBalance(
  6759  		htlcView, true,
  6760  	)
  6761  
  6762  	// Return which ever balance is lowest.
  6763  	if ourRemoteCommitBalance < ourLocalCommitBalance {
  6764  		return ourRemoteCommitBalance, commitSize
  6765  	}
  6766  
  6767  	return ourLocalCommitBalance, commitSize
  6768  }
  6769  
  6770  // availableCommitmentBalance attempts to calculate the balance we have
  6771  // available for HTLCs on the local/remote commitment given the htlcView. To
  6772  // account for sending HTLCs of different sizes, it will report the balance
  6773  // available for sending non-dust HTLCs, which will be manifested on the
  6774  // commitment, increasing the commitment fee we must pay as an initiator,
  6775  // eating into our balance. It will make sure we won't violate the channel
  6776  // reserve constraints for this amount.
  6777  func (lc *LightningChannel) availableCommitmentBalance(view *htlcView,
  6778  	remoteChain bool) (lnwire.MilliAtom, int64) {
  6779  
  6780  	// Compute the current balances for this commitment. This will take
  6781  	// into account HTLCs to determine the commit weight, which the
  6782  	// initiator must pay the fee for.
  6783  	ourBalance, theirBalance, commitSize, filteredView, err := lc.computeView(
  6784  		view, remoteChain, false,
  6785  	)
  6786  	if err != nil {
  6787  		lc.log.Errorf("Unable to fetch available balance: %v", err)
  6788  		return 0, 0
  6789  	}
  6790  
  6791  	// We can never spend from the channel reserve, so we'll subtract it
  6792  	// from our available balance.
  6793  	ourReserve := lnwire.NewMAtomsFromAtoms(
  6794  		lc.channelState.LocalChanCfg.ChanReserve,
  6795  	)
  6796  	if ourReserve <= ourBalance {
  6797  		ourBalance -= ourReserve
  6798  	} else {
  6799  		ourBalance = 0
  6800  	}
  6801  
  6802  	// Calculate the commitment fee in the case where we would add another
  6803  	// HTLC to the commitment, as only the balance remaining after this fee
  6804  	// has been paid is actually available for sending.
  6805  	feePerKB := filteredView.feePerKB
  6806  	htlcCommitFee := lnwire.NewMAtomsFromAtoms(
  6807  		feePerKB.FeeForSize(commitSize + input.HTLCOutputSize),
  6808  	)
  6809  
  6810  	// If we are the channel initiator, we must to subtract this commitment
  6811  	// fee from our available balance in order to ensure we can afford both
  6812  	// the value of the HTLC and the additional commitment fee from adding
  6813  	// the HTLC.
  6814  	if lc.channelState.IsInitiator {
  6815  		// There is an edge case where our non-zero balance is lower
  6816  		// than the htlcCommitFee, where we could still be sending dust
  6817  		// HTLCs, but we return 0 in this case. This is to avoid
  6818  		// lowering our balance even further, as this takes us into a
  6819  		// bad state wehere neither we nor our channel counterparty can
  6820  		// add HTLCs.
  6821  		if ourBalance < htlcCommitFee {
  6822  			return 0, commitSize
  6823  		}
  6824  
  6825  		return ourBalance - htlcCommitFee, commitSize
  6826  	}
  6827  
  6828  	// If we're not the initiator, we must check whether the remote has
  6829  	// enough balance to pay for the fee of our HTLC. We'll start by also
  6830  	// subtracting our counterparty's reserve from their balance.
  6831  	theirReserve := lnwire.NewMAtomsFromAtoms(
  6832  		lc.channelState.RemoteChanCfg.ChanReserve,
  6833  	)
  6834  	if theirReserve <= theirBalance {
  6835  		theirBalance -= theirReserve
  6836  	} else {
  6837  		theirBalance = 0
  6838  	}
  6839  
  6840  	// We'll use the dustlimit and htlcFee to find the largest HTLC value
  6841  	// that will be considered dust on the commitment.
  6842  	dustlimit := lnwire.NewMAtomsFromAtoms(
  6843  		lc.channelState.LocalChanCfg.DustLimit,
  6844  	)
  6845  
  6846  	// For an extra HTLC fee to be paid on our commitment, the HTLC must be
  6847  	// large enough to make a non-dust HTLC timeout transaction.
  6848  	htlcFee := lnwire.NewMAtomsFromAtoms(
  6849  		HtlcTimeoutFee(lc.channelState.ChanType, feePerKB),
  6850  	)
  6851  
  6852  	// If we are looking at the remote commitment, we must use the remote
  6853  	// dust limit and the fee for adding an HTLC success transaction.
  6854  	if remoteChain {
  6855  		dustlimit = lnwire.NewMAtomsFromAtoms(
  6856  			lc.channelState.RemoteChanCfg.DustLimit,
  6857  		)
  6858  		htlcFee = lnwire.NewMAtomsFromAtoms(
  6859  			HtlcSuccessFee(lc.channelState.ChanType, feePerKB),
  6860  		)
  6861  	}
  6862  
  6863  	// The HTLC output will be manifested on the commitment if it
  6864  	// is non-dust after paying the HTLC fee.
  6865  	nonDustHtlcAmt := dustlimit + htlcFee
  6866  
  6867  	// If they cannot pay the fee if we add another non-dust HTLC, we'll
  6868  	// report our available balance just below the non-dust amount, to
  6869  	// avoid attempting HTLCs larger than this size.
  6870  	if theirBalance < htlcCommitFee && ourBalance >= nonDustHtlcAmt {
  6871  		ourBalance = nonDustHtlcAmt - 1
  6872  	}
  6873  
  6874  	return ourBalance, commitSize
  6875  }
  6876  
  6877  // StateSnapshot returns a snapshot of the current fully committed state within
  6878  // the channel.
  6879  func (lc *LightningChannel) StateSnapshot() *channeldb.ChannelSnapshot {
  6880  	lc.RLock()
  6881  	defer lc.RUnlock()
  6882  
  6883  	return lc.channelState.Snapshot()
  6884  }
  6885  
  6886  // validateFeeRate ensures that if the passed fee is applied to the channel,
  6887  // and a new commitment is created (which evaluates this fee), then the
  6888  // initiator of the channel does not dip below their reserve.
  6889  func (lc *LightningChannel) validateFeeRate(feePerKB chainfee.AtomPerKByte) error {
  6890  	// We'll ensure that we can accommodate this new fee change, yet still
  6891  	// be above our reserve balance. Otherwise, we'll reject the fee
  6892  	// update.
  6893  	availableBalance, txSize := lc.availableBalance()
  6894  
  6895  	oldFee := lnwire.NewMAtomsFromAtoms(
  6896  		lc.localCommitChain.tip().feePerKB.FeeForSize(txSize),
  6897  	)
  6898  
  6899  	// Our base balance is the total amount of atoms we can commit
  6900  	// towards fees before factoring in the channel reserve.
  6901  	baseBalance := availableBalance + oldFee
  6902  
  6903  	// Using the size of the commitment transaction if we were to create
  6904  	// a commitment now, we'll compute our remaining balance if we apply
  6905  	// this new fee update.
  6906  	newFee := lnwire.NewMAtomsFromAtoms(
  6907  		feePerKB.FeeForSize(txSize),
  6908  	)
  6909  
  6910  	// If the total fee exceeds our available balance (taking into account
  6911  	// the fee from the last state), then we'll reject this update as it
  6912  	// would mean we need to trim our entire output.
  6913  	if newFee > baseBalance {
  6914  		return fmt.Errorf("cannot apply fee_update=%v atom/kB, new fee "+
  6915  			"of %v is greater than balance of %v", int64(feePerKB),
  6916  			newFee, baseBalance)
  6917  	}
  6918  
  6919  	// TODO(halseth): should fail if fee update is unreasonable,
  6920  	// as specified in BOLT#2.
  6921  	//  * COMMENT(roasbeef): can cross-check with our ideal fee rate
  6922  
  6923  	return nil
  6924  }
  6925  
  6926  // UpdateFee initiates a fee update for this channel. Must only be called by
  6927  // the channel initiator, and must be called before sending update_fee to
  6928  // the remote.
  6929  func (lc *LightningChannel) UpdateFee(feePerKB chainfee.AtomPerKByte) error {
  6930  	lc.Lock()
  6931  	defer lc.Unlock()
  6932  
  6933  	// Only initiator can send fee update, so trying to send one as
  6934  	// non-initiator will fail.
  6935  	if !lc.channelState.IsInitiator {
  6936  		return fmt.Errorf("local fee update as non-initiator")
  6937  	}
  6938  
  6939  	// Ensure that the passed fee rate meets our current requirements.
  6940  	if err := lc.validateFeeRate(feePerKB); err != nil {
  6941  		return err
  6942  	}
  6943  
  6944  	pd := &PaymentDescriptor{
  6945  		LogIndex:  lc.localUpdateLog.logIndex,
  6946  		Amount:    lnwire.NewMAtomsFromAtoms(dcrutil.Amount(feePerKB)),
  6947  		EntryType: FeeUpdate,
  6948  	}
  6949  
  6950  	lc.localUpdateLog.appendUpdate(pd)
  6951  
  6952  	return nil
  6953  }
  6954  
  6955  // ReceiveUpdateFee handles an updated fee sent from remote. This method will
  6956  // return an error if called as channel initiator.
  6957  func (lc *LightningChannel) ReceiveUpdateFee(feePerKB chainfee.AtomPerKByte) error {
  6958  	lc.Lock()
  6959  	defer lc.Unlock()
  6960  
  6961  	// Only initiator can send fee update, and we must fail if we receive
  6962  	// fee update as initiator
  6963  	if lc.channelState.IsInitiator {
  6964  		return fmt.Errorf("received fee update as initiator")
  6965  	}
  6966  
  6967  	// TODO(roasbeef): or just modify to use the other balance?
  6968  	pd := &PaymentDescriptor{
  6969  		LogIndex:  lc.remoteUpdateLog.logIndex,
  6970  		Amount:    lnwire.NewMAtomsFromAtoms(dcrutil.Amount(feePerKB)),
  6971  		EntryType: FeeUpdate,
  6972  	}
  6973  
  6974  	lc.remoteUpdateLog.appendUpdate(pd)
  6975  	return nil
  6976  }
  6977  
  6978  // generateRevocation generates the revocation message for a given height.
  6979  func (lc *LightningChannel) generateRevocation(height uint64) (*lnwire.RevokeAndAck,
  6980  	error) {
  6981  
  6982  	// Now that we've accept a new state transition, we send the remote
  6983  	// party the revocation for our current commitment state.
  6984  	revocationMsg := &lnwire.RevokeAndAck{}
  6985  	commitSecret, err := lc.channelState.RevocationProducer.AtIndex(height)
  6986  	if err != nil {
  6987  		return nil, err
  6988  	}
  6989  	copy(revocationMsg.Revocation[:], commitSecret[:])
  6990  
  6991  	// Along with this revocation, we'll also send the _next_ commitment
  6992  	// point that the remote party should use to create our next commitment
  6993  	// transaction. We use a +2 here as we already gave them a look ahead
  6994  	// of size one after the FundingLocked message was sent:
  6995  	//
  6996  	// 0: current revocation, 1: their "next" revocation, 2: this revocation
  6997  	//
  6998  	// We're revoking the current revocation. Once they receive this
  6999  	// message they'll set the "current" revocation for us to their stored
  7000  	// "next" revocation, and this revocation will become their new "next"
  7001  	// revocation.
  7002  	//
  7003  	// Put simply in the window slides to the left by one.
  7004  	nextCommitSecret, err := lc.channelState.RevocationProducer.AtIndex(
  7005  		height + 2,
  7006  	)
  7007  	if err != nil {
  7008  		return nil, err
  7009  	}
  7010  
  7011  	revocationMsg.NextRevocationKey = input.ComputeCommitmentPoint(nextCommitSecret[:])
  7012  	revocationMsg.ChanID = lnwire.NewChanIDFromOutPoint(
  7013  		&lc.channelState.FundingOutpoint)
  7014  
  7015  	return revocationMsg, nil
  7016  }
  7017  
  7018  // CreateCooperativeCloseTx creates a transaction which if signed by both
  7019  // parties, then broadcast cooperatively closes an active channel. The creation
  7020  // of the closure transaction is modified by a boolean indicating if the party
  7021  // constructing the channel is the initiator of the closure. Currently it is
  7022  // expected that the initiator pays the transaction fees for the closing
  7023  // transaction in full.
  7024  func CreateCooperativeCloseTx(fundingTxIn wire.TxIn,
  7025  	localDust, remoteDust, ourBalance, theirBalance dcrutil.Amount,
  7026  	ourDeliveryScript, theirDeliveryScript []byte) *wire.MsgTx {
  7027  
  7028  	// Construct the transaction to perform a cooperative closure of the
  7029  	// channel. In the event that one side doesn't have any settled funds
  7030  	// within the channel then a refund output for that particular side can
  7031  	// be omitted.
  7032  	closeTx := wire.NewMsgTx()
  7033  	closeTx.Version = 2
  7034  	closeTx.AddTxIn(&fundingTxIn)
  7035  
  7036  	// Create both cooperative closure outputs, properly respecting the
  7037  	// dust limits of both parties.
  7038  	if ourBalance >= localDust {
  7039  		closeTx.AddTxOut(&wire.TxOut{
  7040  			PkScript: ourDeliveryScript,
  7041  			Value:    int64(ourBalance),
  7042  		})
  7043  	}
  7044  	if theirBalance >= remoteDust {
  7045  		closeTx.AddTxOut(&wire.TxOut{
  7046  			PkScript: theirDeliveryScript,
  7047  			Value:    int64(theirBalance),
  7048  		})
  7049  	}
  7050  
  7051  	txsort.InPlaceSort(closeTx)
  7052  
  7053  	return closeTx
  7054  }
  7055  
  7056  // CalcFee returns the commitment fee to use for the given
  7057  // fee rate (fee-per-kw).
  7058  func (lc *LightningChannel) CalcFee(feeRate chainfee.AtomPerKByte) dcrutil.Amount {
  7059  	return feeRate.FeeForSize(CommitSize(lc.channelState.ChanType))
  7060  }
  7061  
  7062  // MaxFeeRate returns the maximum fee rate given an allocation of the channel
  7063  // initiator's spendable balance along with the local reserve amount. This can
  7064  // be useful to determine when we should stop proposing fee updates that exceed
  7065  // our maximum allocation.
  7066  //
  7067  // NOTE: This should only be used for channels in which the local commitment is
  7068  // the initiator.
  7069  func (lc *LightningChannel) MaxFeeRate(maxAllocation float64) chainfee.AtomPerKByte {
  7070  	lc.RLock()
  7071  	defer lc.RUnlock()
  7072  
  7073  	// The maximum fee depends on the available balance that can be
  7074  	// committed towards fees. It takes into account our local reserve
  7075  	// balance.
  7076  	availableBalance, size := lc.availableBalance()
  7077  
  7078  	oldFee := lc.localCommitChain.tip().feePerKB.FeeForSize(size)
  7079  
  7080  	// baseBalance is the maximum amount available for us to spend on fees.
  7081  	baseBalance := availableBalance.ToAtoms() + oldFee
  7082  
  7083  	maxFee := float64(baseBalance) * maxAllocation
  7084  
  7085  	// Ensure the fee rate doesn't dip below the fee floor.
  7086  	maxFeeRate := maxFee / (float64(size) / 1000)
  7087  	return chainfee.AtomPerKByte(
  7088  		math.Max(maxFeeRate, float64(chainfee.FeePerKBFloor)),
  7089  	)
  7090  }
  7091  
  7092  // IdealCommitFeeRate uses the current network fee, the minimum relay fee,
  7093  // maximum fee allocation and anchor channel commitment fee rate to determine
  7094  // the ideal fee to be used for the commitments of the channel.
  7095  func (lc *LightningChannel) IdealCommitFeeRate(netFeeRate, minRelayFeeRate,
  7096  	maxAnchorCommitFeeRate chainfee.AtomPerKByte,
  7097  	maxFeeAlloc float64) chainfee.AtomPerKByte {
  7098  
  7099  	// Get the maximum fee rate that we can use given our max fee allocation
  7100  	// and given the local reserve balance that we must preserve.
  7101  	maxFeeRate := lc.MaxFeeRate(maxFeeAlloc)
  7102  
  7103  	var commitFeeRate chainfee.AtomPerKByte
  7104  
  7105  	// If the channel has anchor outputs then cap the fee rate at the
  7106  	// max anchor fee rate if that maximum is less than our max fee rate.
  7107  	// Otherwise, cap the fee rate at the max fee rate.
  7108  	switch lc.channelState.ChanType.HasAnchors() &&
  7109  		maxFeeRate > maxAnchorCommitFeeRate {
  7110  
  7111  	case true:
  7112  		commitFeeRate = chainfee.AtomPerKByte(
  7113  			math.Min(
  7114  				float64(netFeeRate),
  7115  				float64(maxAnchorCommitFeeRate),
  7116  			),
  7117  		)
  7118  
  7119  	case false:
  7120  		commitFeeRate = chainfee.AtomPerKByte(
  7121  			math.Min(float64(netFeeRate), float64(maxFeeRate)),
  7122  		)
  7123  	}
  7124  
  7125  	// Decred network has a higher relay floor, so impose it here.
  7126  	if commitFeeRate < chainfee.FeePerKBFloor {
  7127  		commitFeeRate = chainfee.FeePerKBFloor
  7128  	}
  7129  
  7130  	if commitFeeRate >= minRelayFeeRate {
  7131  		return commitFeeRate
  7132  	}
  7133  
  7134  	// The commitment fee rate is below the minimum relay fee rate.
  7135  	// If the min relay fee rate is still below the maximum fee, then use
  7136  	// the minimum relay fee rate.
  7137  	if minRelayFeeRate <= maxFeeRate {
  7138  		return minRelayFeeRate
  7139  	}
  7140  
  7141  	// The minimum relay fee rate is more than the ideal maximum fee rate.
  7142  	// Check if it is smaller than the absolute maximum fee rate we can
  7143  	// use. If it is, then we use the minimum relay fee rate and we log a
  7144  	// warning to indicate that the max channel fee allocation option was
  7145  	// ignored.
  7146  	absoluteMaxFee := lc.MaxFeeRate(1)
  7147  	if minRelayFeeRate <= absoluteMaxFee {
  7148  		lc.log.Warn("Ignoring max channel fee allocation to " +
  7149  			"ensure that the commitment fee is above the " +
  7150  			"minimum relay fee.")
  7151  
  7152  		return minRelayFeeRate
  7153  	}
  7154  
  7155  	// The absolute maximum fee rate we can pay is below the minimum
  7156  	// relay fee rate. The commitment tx will not be able to propagate.
  7157  	// To give the transaction the best chance, we use the absolute
  7158  	// maximum fee we have available and we log an error.
  7159  	lc.log.Errorf("The commitment fee rate of %s is below the current "+
  7160  		"minimum relay fee rate of %s. The max fee rate of %s will be"+
  7161  		"used.", commitFeeRate, minRelayFeeRate, absoluteMaxFee)
  7162  
  7163  	return absoluteMaxFee
  7164  }
  7165  
  7166  // RemoteNextRevocation returns the channelState's RemoteNextRevocation.
  7167  func (lc *LightningChannel) RemoteNextRevocation() *secp256k1.PublicKey {
  7168  	lc.RLock()
  7169  	defer lc.RUnlock()
  7170  
  7171  	return lc.channelState.RemoteNextRevocation
  7172  }
  7173  
  7174  // IsInitiator returns true if we were the ones that initiated the funding
  7175  // workflow which led to the creation of this channel. Otherwise, it returns
  7176  // false.
  7177  func (lc *LightningChannel) IsInitiator() bool {
  7178  	lc.RLock()
  7179  	defer lc.RUnlock()
  7180  
  7181  	return lc.channelState.IsInitiator
  7182  }
  7183  
  7184  // CommitFeeRate returns the current fee rate of the commitment transaction in
  7185  // units of atom-per-kb.
  7186  func (lc *LightningChannel) CommitFeeRate() chainfee.AtomPerKByte {
  7187  	lc.RLock()
  7188  	defer lc.RUnlock()
  7189  
  7190  	return chainfee.AtomPerKByte(lc.channelState.LocalCommitment.FeePerKB)
  7191  }
  7192  
  7193  // IsPending returns true if the channel's funding transaction has been fully
  7194  // confirmed, and false otherwise.
  7195  func (lc *LightningChannel) IsPending() bool {
  7196  	lc.RLock()
  7197  	defer lc.RUnlock()
  7198  
  7199  	return lc.channelState.IsPending
  7200  }
  7201  
  7202  // State provides access to the channel's internal state.
  7203  func (lc *LightningChannel) State() *channeldb.OpenChannel {
  7204  	return lc.channelState
  7205  }
  7206  
  7207  // MarkBorked marks the event when the channel as reached an irreconcilable
  7208  // state, such as a channel breach or state desynchronization. Borked channels
  7209  // should never be added to the switch.
  7210  func (lc *LightningChannel) MarkBorked() error {
  7211  	lc.Lock()
  7212  	defer lc.Unlock()
  7213  
  7214  	return lc.channelState.MarkBorked()
  7215  }
  7216  
  7217  // MarkCommitmentBroadcasted marks the channel as a commitment transaction has
  7218  // been broadcast, either our own or the remote, and we should watch the chain
  7219  // for it to confirm before taking any further action. It takes a boolean which
  7220  // indicates whether we initiated the close.
  7221  func (lc *LightningChannel) MarkCommitmentBroadcasted(tx *wire.MsgTx,
  7222  	locallyInitiated bool) error {
  7223  
  7224  	lc.Lock()
  7225  	defer lc.Unlock()
  7226  
  7227  	return lc.channelState.MarkCommitmentBroadcasted(tx, locallyInitiated)
  7228  }
  7229  
  7230  // MarkCoopBroadcasted marks the channel as a cooperative close transaction has
  7231  // been broadcast, and that we should watch the chain for it to confirm before
  7232  // taking any further action. It takes a locally initiated bool which is true
  7233  // if we initiated the cooperative close.
  7234  func (lc *LightningChannel) MarkCoopBroadcasted(tx *wire.MsgTx,
  7235  	localInitiated bool) error {
  7236  
  7237  	lc.Lock()
  7238  	defer lc.Unlock()
  7239  
  7240  	return lc.channelState.MarkCoopBroadcasted(tx, localInitiated)
  7241  }
  7242  
  7243  // MarkDataLoss marks sets the channel status to LocalDataLoss and stores the
  7244  // passed commitPoint for use to retrieve funds in case the remote force closes
  7245  // the channel.
  7246  func (lc *LightningChannel) MarkDataLoss(commitPoint *secp256k1.PublicKey) error {
  7247  	lc.Lock()
  7248  	defer lc.Unlock()
  7249  
  7250  	return lc.channelState.MarkDataLoss(commitPoint)
  7251  }
  7252  
  7253  // ActiveHtlcs returns a slice of HTLC's which are currently active on *both*
  7254  // commitment transactions.
  7255  func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC {
  7256  	lc.RLock()
  7257  	defer lc.RUnlock()
  7258  
  7259  	return lc.channelState.ActiveHtlcs()
  7260  }
  7261  
  7262  // LocalChanReserve returns our local ChanReserve requirement for the remote party.
  7263  func (lc *LightningChannel) LocalChanReserve() dcrutil.Amount {
  7264  	return lc.channelState.LocalChanCfg.ChanReserve
  7265  }
  7266  
  7267  // NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure
  7268  // this always returns the next index that has been not been allocated, this
  7269  // will first try to examine any pending commitments, before falling back to the
  7270  // last locked-in local commitment.
  7271  func (lc *LightningChannel) NextLocalHtlcIndex() (uint64, error) {
  7272  	lc.RLock()
  7273  	defer lc.RUnlock()
  7274  
  7275  	return lc.channelState.NextLocalHtlcIndex()
  7276  }
  7277  
  7278  // FwdMinHtlc returns the minimum HTLC value required by the remote node, i.e.
  7279  // the minimum value HTLC we can forward on this channel.
  7280  func (lc *LightningChannel) FwdMinHtlc() lnwire.MilliAtom {
  7281  	return lc.channelState.LocalChanCfg.MinHTLC
  7282  }
  7283  
  7284  // unsignedLocalUpdates retrieves the unsigned local updates that we should
  7285  // store upon receiving a revocation. This function is called from
  7286  // ReceiveRevocation. remoteMessageIndex is the height into the local update
  7287  // log that the remote commitment chain tip includes. localMessageIndex
  7288  // is the height into the local update log that the local commitment tail
  7289  // includes. Our local updates that are unsigned by the remote should
  7290  // have height greater than or equal to localMessageIndex (not on our commit),
  7291  // and height less than remoteMessageIndex (on the remote commit).
  7292  //
  7293  // NOTE: remoteMessageIndex is the height on the tip because this is called
  7294  // before the tail is advanced to the tip during ReceiveRevocation.
  7295  func (lc *LightningChannel) unsignedLocalUpdates(remoteMessageIndex,
  7296  	localMessageIndex uint64, chanID lnwire.ChannelID) []channeldb.LogUpdate {
  7297  
  7298  	var localPeerUpdates []channeldb.LogUpdate
  7299  	for e := lc.localUpdateLog.Front(); e != nil; e = e.Next() {
  7300  		pd := e.Value.(*PaymentDescriptor)
  7301  
  7302  		// We don't save add updates as they are restored from the
  7303  		// remote commitment in restoreStateLogs.
  7304  		if pd.EntryType == Add {
  7305  			continue
  7306  		}
  7307  
  7308  		// This is a settle/fail that is on the remote commitment, but
  7309  		// not on the local commitment. We expect this update to be
  7310  		// covered in the next commitment signature that the remote
  7311  		// sends.
  7312  		if pd.LogIndex < remoteMessageIndex && pd.LogIndex >= localMessageIndex {
  7313  			logUpdate := channeldb.LogUpdate{
  7314  				LogIndex: pd.LogIndex,
  7315  			}
  7316  
  7317  			switch pd.EntryType {
  7318  			case FeeUpdate:
  7319  				logUpdate.UpdateMsg = &lnwire.UpdateFee{
  7320  					ChanID:   chanID,
  7321  					FeePerKB: uint32(pd.Amount.ToAtoms()),
  7322  				}
  7323  			case Settle:
  7324  				logUpdate.UpdateMsg = &lnwire.UpdateFulfillHTLC{
  7325  					ChanID:          chanID,
  7326  					ID:              pd.ParentIndex,
  7327  					PaymentPreimage: pd.RPreimage,
  7328  				}
  7329  			case Fail:
  7330  				logUpdate.UpdateMsg = &lnwire.UpdateFailHTLC{
  7331  					ChanID: chanID,
  7332  					ID:     pd.ParentIndex,
  7333  					Reason: pd.FailReason,
  7334  				}
  7335  			case MalformedFail:
  7336  				logUpdate.UpdateMsg = &lnwire.UpdateFailMalformedHTLC{
  7337  					ChanID:       chanID,
  7338  					ID:           pd.ParentIndex,
  7339  					ShaOnionBlob: pd.ShaOnionBlob,
  7340  					FailureCode:  pd.FailCode,
  7341  				}
  7342  			}
  7343  
  7344  			localPeerUpdates = append(localPeerUpdates, logUpdate)
  7345  		}
  7346  	}
  7347  
  7348  	return localPeerUpdates
  7349  }