github.com/decred/dcrlnd@v0.7.6/htlcswitch/link.go (about)

     1  package htlcswitch
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/sha256"
     6  	"fmt"
     7  	prand "math/rand"
     8  	"sync"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/davecgh/go-spew/spew"
    13  	"github.com/decred/dcrd/dcrutil/v4"
    14  	"github.com/decred/dcrd/wire"
    15  	"github.com/decred/slog"
    16  
    17  	"github.com/decred/dcrlnd/build"
    18  	"github.com/decred/dcrlnd/channeldb"
    19  	"github.com/decred/dcrlnd/contractcourt"
    20  	"github.com/decred/dcrlnd/htlcswitch/hodl"
    21  	"github.com/decred/dcrlnd/htlcswitch/hop"
    22  	"github.com/decred/dcrlnd/invoices"
    23  	"github.com/decred/dcrlnd/lnpeer"
    24  	"github.com/decred/dcrlnd/lntypes"
    25  	"github.com/decred/dcrlnd/lnwallet"
    26  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    27  	"github.com/decred/dcrlnd/lnwire"
    28  	"github.com/decred/dcrlnd/queue"
    29  	"github.com/decred/dcrlnd/ticker"
    30  	"github.com/go-errors/errors"
    31  )
    32  
    33  func init() {
    34  	prand.Seed(time.Now().UnixNano())
    35  }
    36  
    37  const (
    38  	// DefaultMaxOutgoingCltvExpiry is the maximum outgoing time lock that
    39  	// the node accepts for forwarded payments. The value is relative to the
    40  	// current block height. The reason to have a maximum is to prevent
    41  	// funds getting locked up unreasonably long. Otherwise, an attacker
    42  	// willing to lock its own funds too, could force the funds of this node
    43  	// to be locked up for an indefinite (max int32) number of blocks.
    44  	//
    45  	// The value 4032 corresponds to on average two weeks worth of blocks
    46  	// and is based on the maximum number of hops (20), the default CLTV
    47  	// delta (80), and some extra margin to account for the other lightning
    48  	// implementations and past lnd versions which used to have a default
    49  	// CLTV delta of 144.
    50  	DefaultMaxOutgoingCltvExpiry = 4032
    51  
    52  	// DefaultMinLinkFeeUpdateTimeout represents the minimum interval in
    53  	// which a link should propose to update its commitment fee rate.
    54  	DefaultMinLinkFeeUpdateTimeout = 10 * time.Minute
    55  
    56  	// DefaultMaxLinkFeeUpdateTimeout represents the maximum interval in
    57  	// which a link should propose to update its commitment fee rate.
    58  	DefaultMaxLinkFeeUpdateTimeout = 60 * time.Minute
    59  
    60  	// DefaultMaxLinkFeeAllocation is the highest allocation we'll allow
    61  	// a channel's commitment fee to be of its balance. This only applies to
    62  	// the initiator of the channel.
    63  	DefaultMaxLinkFeeAllocation float64 = 0.5
    64  )
    65  
    66  // ForwardingPolicy describes the set of constraints that a given ChannelLink
    67  // is to adhere to when forwarding HTLC's. For each incoming HTLC, this set of
    68  // constraints will be consulted in order to ensure that adequate fees are
    69  // paid, and our time-lock parameters are respected. In the event that an
    70  // incoming HTLC violates any of these constraints, it is to be _rejected_ with
    71  // the error possibly carrying along a ChannelUpdate message that includes the
    72  // latest policy.
    73  type ForwardingPolicy struct {
    74  	// MinHTLC is the smallest HTLC that is to be forwarded.
    75  	MinHTLCOut lnwire.MilliAtom
    76  
    77  	// MaxHTLC is the largest HTLC that is to be forwarded.
    78  	MaxHTLC lnwire.MilliAtom
    79  
    80  	// BaseFee is the base fee, expressed in milli-atoms that must be paid
    81  	// for each incoming HTLC. This field, combined with FeeRate is used to
    82  	// compute the required fee for a given HTLC.
    83  	BaseFee lnwire.MilliAtom
    84  
    85  	// FeeRate is the fee rate, expressed in milli-atom that must be
    86  	// paid for each incoming HTLC. This field combined with BaseFee is
    87  	// used to compute the required fee for a given HTLC.
    88  	FeeRate lnwire.MilliAtom
    89  
    90  	// TimeLockDelta is the absolute time-lock value, expressed in blocks,
    91  	// that will be subtracted from an incoming HTLC's timelock value to
    92  	// create the time-lock value for the forwarded outgoing HTLC. The
    93  	// following constraint MUST hold for an HTLC to be forwarded:
    94  	//
    95  	//  * incomingHtlc.timeLock - timeLockDelta = fwdInfo.OutgoingCTLV
    96  	//
    97  	//    where fwdInfo is the forwarding information extracted from the
    98  	//    per-hop payload of the incoming HTLC's onion packet.
    99  	TimeLockDelta uint32
   100  
   101  	// TODO(roasbeef): add fee module inside of switch
   102  }
   103  
   104  // ExpectedFee computes the expected fee for a given htlc amount. The value
   105  // returned from this function is to be used as a sanity check when forwarding
   106  // HTLC's to ensure that an incoming HTLC properly adheres to our propagated
   107  // forwarding policy.
   108  //
   109  // TODO(roasbeef): also add in current available channel bandwidth, inverse
   110  // func
   111  func ExpectedFee(f ForwardingPolicy,
   112  	htlcAmt lnwire.MilliAtom) lnwire.MilliAtom {
   113  
   114  	return f.BaseFee + (htlcAmt*f.FeeRate)/1000000
   115  }
   116  
   117  // ChannelLinkConfig defines the configuration for the channel link. ALL
   118  // elements within the configuration MUST be non-nil for channel link to carry
   119  // out its duties.
   120  type ChannelLinkConfig struct {
   121  	// FwrdingPolicy is the initial forwarding policy to be used when
   122  	// deciding whether to forwarding incoming HTLC's or not. This value
   123  	// can be updated with subsequent calls to UpdateForwardingPolicy
   124  	// targeted at a given ChannelLink concrete interface implementation.
   125  	FwrdingPolicy ForwardingPolicy
   126  
   127  	// Circuits provides restricted access to the switch's circuit map,
   128  	// allowing the link to open and close circuits.
   129  	Circuits CircuitModifier
   130  
   131  	// Switch provides a reference to the HTLC switch, we only use this in
   132  	// testing to access circuit operations not typically exposed by the
   133  	// CircuitModifier.
   134  	//
   135  	// TODO(conner): remove after refactoring htlcswitch testing framework.
   136  	Switch *Switch
   137  
   138  	// BestHeight returns the best known height.
   139  	BestHeight func() uint32
   140  
   141  	// ResetChanReestablishWaitTime zeroes the time it took to reestablish
   142  	// the channel after restart.
   143  	ResetChanReestablishWaitTime func(chanID lnwire.ShortChannelID) error
   144  
   145  	// AddToChanReestablishWaitTime adds to the total time tracked to have
   146  	// taken a channel to be reestablished.
   147  	AddToChanReestablishWaitTime func(chanID lnwire.ShortChannelID, waitTime time.Duration) error
   148  
   149  	// ForwardPackets attempts to forward the batch of htlcs through the
   150  	// switch. The function returns and error in case it fails to send one or
   151  	// more packets. The link's quit signal should be provided to allow
   152  	// cancellation of forwarding during link shutdown.
   153  	ForwardPackets func(chan struct{}, ...*htlcPacket) error
   154  
   155  	// DecodeHopIterators facilitates batched decoding of HTLC Sphinx onion
   156  	// blobs, which are then used to inform how to forward an HTLC.
   157  	//
   158  	// NOTE: This function assumes the same set of readers and preimages
   159  	// are always presented for the same identifier.
   160  	DecodeHopIterators func([]byte, []hop.DecodeHopIteratorRequest) (
   161  		[]hop.DecodeHopIteratorResponse, error)
   162  
   163  	// ExtractErrorEncrypter function is responsible for decoding HTLC
   164  	// Sphinx onion blob, and creating onion failure obfuscator.
   165  	ExtractErrorEncrypter hop.ErrorEncrypterExtracter
   166  
   167  	// FetchLastChannelUpdate retrieves the latest routing policy for a
   168  	// target channel. This channel will typically be the outgoing channel
   169  	// specified when we receive an incoming HTLC.  This will be used to
   170  	// provide payment senders our latest policy when sending encrypted
   171  	// error messages.
   172  	FetchLastChannelUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error)
   173  
   174  	// Peer is a lightning network node with which we have the channel link
   175  	// opened.
   176  	Peer lnpeer.Peer
   177  
   178  	// Registry is a sub-system which responsible for managing the invoices
   179  	// in thread-safe manner.
   180  	Registry InvoiceDatabase
   181  
   182  	// PreimageCache is a global witness beacon that houses any new
   183  	// preimages discovered by other links. We'll use this to add new
   184  	// witnesses that we discover which will notify any sub-systems
   185  	// subscribed to new events.
   186  	PreimageCache contractcourt.WitnessBeacon
   187  
   188  	// OnChannelFailure is a function closure that we'll call if the
   189  	// channel failed for some reason. Depending on the severity of the
   190  	// error, the closure potentially must force close this channel and
   191  	// disconnect the peer.
   192  	//
   193  	// NOTE: The method must return in order for the ChannelLink to be able
   194  	// to shut down properly.
   195  	OnChannelFailure func(lnwire.ChannelID, lnwire.ShortChannelID,
   196  		LinkFailureError)
   197  
   198  	// UpdateContractSignals is a function closure that we'll use to update
   199  	// outside sub-systems with the latest signals for our inner Lightning
   200  	// channel. These signals will notify the caller when the channel has
   201  	// been closed, or when the set of active HTLC's is updated.
   202  	UpdateContractSignals func(*contractcourt.ContractSignals) error
   203  
   204  	// ChainEvents is an active subscription to the chain watcher for this
   205  	// channel to be notified of any on-chain activity related to this
   206  	// channel.
   207  	ChainEvents *contractcourt.ChainEventSubscription
   208  
   209  	// FeeEstimator is an instance of a live fee estimator which will be
   210  	// used to dynamically regulate the current fee of the commitment
   211  	// transaction to ensure timely confirmation.
   212  	FeeEstimator chainfee.Estimator
   213  
   214  	// hodl.Mask is a bitvector composed of hodl.Flags, specifying breakpoints
   215  	// for HTLC forwarding internal to the switch.
   216  	//
   217  	// NOTE: This should only be used for testing.
   218  	HodlMask hodl.Mask
   219  
   220  	// SyncStates is used to indicate that we need send the channel
   221  	// reestablishment message to the remote peer. It should be done if our
   222  	// clients have been restarted, or remote peer have been reconnected.
   223  	SyncStates bool
   224  
   225  	// BatchTicker is the ticker that determines the interval that we'll
   226  	// use to check the batch to see if there're any updates we should
   227  	// flush out. By batching updates into a single commit, we attempt to
   228  	// increase throughput by maximizing the number of updates coalesced
   229  	// into a single commit.
   230  	BatchTicker ticker.Ticker
   231  
   232  	// FwdPkgGCTicker is the ticker determining the frequency at which
   233  	// garbage collection of forwarding packages occurs. We use a
   234  	// time-based approach, as opposed to block epochs, as to not hinder
   235  	// syncing.
   236  	FwdPkgGCTicker ticker.Ticker
   237  
   238  	// PendingCommitTicker is a ticker that allows the link to determine if
   239  	// a locally initiated commitment dance gets stuck waiting for the
   240  	// remote party to revoke.
   241  	PendingCommitTicker ticker.Ticker
   242  
   243  	// BatchSize is the max size of a batch of updates done to the link
   244  	// before we do a state update.
   245  	BatchSize uint32
   246  
   247  	// UnsafeReplay will cause a link to replay the adds in its latest
   248  	// commitment txn after the link is restarted. This should only be used
   249  	// in testing, it is here to ensure the sphinx replay detection on the
   250  	// receiving node is persistent.
   251  	UnsafeReplay bool
   252  
   253  	// MinFeeUpdateTimeout represents the minimum interval in which a link
   254  	// will propose to update its commitment fee rate. A random timeout will
   255  	// be selected between this and MaxFeeUpdateTimeout.
   256  	MinFeeUpdateTimeout time.Duration
   257  
   258  	// MaxFeeUpdateTimeout represents the maximum interval in which a link
   259  	// will propose to update its commitment fee rate. A random timeout will
   260  	// be selected between this and MinFeeUpdateTimeout.
   261  	MaxFeeUpdateTimeout time.Duration
   262  
   263  	// OutgoingCltvRejectDelta defines the number of blocks before expiry of
   264  	// an htlc where we don't offer an htlc anymore. This should be at least
   265  	// the outgoing broadcast delta, because in any case we don't want to
   266  	// risk offering an htlc that triggers channel closure.
   267  	OutgoingCltvRejectDelta uint32
   268  
   269  	// TowerClient is an optional engine that manages the signing,
   270  	// encrypting, and uploading of justice transactions to the daemon's
   271  	// configured set of watchtowers for legacy channels.
   272  	TowerClient TowerClient
   273  
   274  	// MaxOutgoingCltvExpiry is the maximum outgoing timelock that the link
   275  	// should accept for a forwarded HTLC. The value is relative to the
   276  	// current block height.
   277  	MaxOutgoingCltvExpiry uint32
   278  
   279  	// MaxFeeAllocation is the highest allocation we'll allow a channel's
   280  	// commitment fee to be of its balance. This only applies to the
   281  	// initiator of the channel.
   282  	MaxFeeAllocation float64
   283  
   284  	// MaxAnchorsCommitFeeRate is the max commitment fee rate we'll use as
   285  	// the initiator for channels of the anchor type.
   286  	MaxAnchorsCommitFeeRate chainfee.AtomPerKByte
   287  
   288  	// NotifyActiveLink allows the link to tell the ChannelNotifier when a
   289  	// link is first started.
   290  	NotifyActiveLink func(wire.OutPoint)
   291  
   292  	// NotifyActiveChannel allows the link to tell the ChannelNotifier when
   293  	// channels becomes active.
   294  	NotifyActiveChannel func(wire.OutPoint)
   295  
   296  	// NotifyInactiveChannel allows the switch to tell the ChannelNotifier
   297  	// when channels become inactive.
   298  	NotifyInactiveChannel func(wire.OutPoint)
   299  
   300  	// HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc
   301  	// events through.
   302  	HtlcNotifier htlcNotifier
   303  }
   304  
   305  // localUpdateAddMsg contains a locally initiated htlc and a channel that will
   306  // receive the outcome of the link processing. This channel must be buffered to
   307  // prevent the link from blocking.
   308  type localUpdateAddMsg struct {
   309  	pkt *htlcPacket
   310  	err chan error
   311  }
   312  
   313  // shutdownReq contains an error channel that will be used by the channelLink
   314  // to send an error if shutdown failed. If shutdown succeeded, the channel will
   315  // be closed.
   316  type shutdownReq struct {
   317  	err chan error
   318  }
   319  
   320  // channelLink is the service which drives a channel's commitment update
   321  // state-machine. In the event that an HTLC needs to be propagated to another
   322  // link, the forward handler from config is used which sends HTLC to the
   323  // switch. Additionally, the link encapsulate logic of commitment protocol
   324  // message ordering and updates.
   325  type channelLink struct {
   326  	// The following fields are only meant to be used *atomically*
   327  	started       int32
   328  	reestablished int32
   329  	shutdown      int32
   330  
   331  	// failed should be set to true in case a link error happens, making
   332  	// sure we don't process any more updates.
   333  	failed bool
   334  
   335  	// keystoneBatch represents a volatile list of keystones that must be
   336  	// written before attempting to sign the next commitment txn. These
   337  	// represent all the HTLC's forwarded to the link from the switch. Once
   338  	// we lock them into our outgoing commitment, then the circuit has a
   339  	// keystone, and is fully opened.
   340  	keystoneBatch []Keystone
   341  
   342  	// openedCircuits is the set of all payment circuits that will be open
   343  	// once we make our next commitment. After making the commitment we'll
   344  	// ACK all these from our mailbox to ensure that they don't get
   345  	// re-delivered if we reconnect.
   346  	openedCircuits []CircuitKey
   347  
   348  	// closedCircuits is the set of all payment circuits that will be
   349  	// closed once we make our next commitment. After taking the commitment
   350  	// we'll ACK all these to ensure that they don't get re-delivered if we
   351  	// reconnect.
   352  	closedCircuits []CircuitKey
   353  
   354  	// channel is a lightning network channel to which we apply htlc
   355  	// updates.
   356  	channel *lnwallet.LightningChannel
   357  
   358  	// shortChanID is the most up to date short channel ID for the link.
   359  	shortChanID lnwire.ShortChannelID
   360  
   361  	// cfg is a structure which carries all dependable fields/handlers
   362  	// which may affect behaviour of the service.
   363  	cfg ChannelLinkConfig
   364  
   365  	// mailBox is the main interface between the outside world and the
   366  	// link. All incoming messages will be sent over this mailBox. Messages
   367  	// include new updates from our connected peer, and new packets to be
   368  	// forwarded sent by the switch.
   369  	mailBox MailBox
   370  
   371  	// upstream is a channel that new messages sent from the remote peer to
   372  	// the local peer will be sent across.
   373  	upstream chan lnwire.Message
   374  
   375  	// downstream is a channel in which new multi-hop HTLC's to be
   376  	// forwarded will be sent across. Messages from this channel are sent
   377  	// by the HTLC switch.
   378  	downstream chan *htlcPacket
   379  
   380  	// localUpdateAdd is a channel to which locally initiated HTLCs are
   381  	// sent across.
   382  	localUpdateAdd chan *localUpdateAddMsg
   383  
   384  	// htlcUpdates is a channel that we'll use to update outside
   385  	// sub-systems with the latest set of active HTLC's on our channel.
   386  	htlcUpdates chan *contractcourt.ContractUpdate
   387  
   388  	// shutdownRequest is a channel that the channelLink will listen on to
   389  	// service shutdown requests from ShutdownIfChannelClean calls.
   390  	shutdownRequest chan *shutdownReq
   391  
   392  	// updateFeeTimer is the timer responsible for updating the link's
   393  	// commitment fee every time it fires.
   394  	updateFeeTimer *time.Timer
   395  
   396  	// uncommittedPreimages stores a list of all preimages that have been
   397  	// learned since receiving the last CommitSig from the remote peer. The
   398  	// batch will be flushed just before accepting the subsequent CommitSig
   399  	// or on shutdown to avoid doing a write for each preimage received.
   400  	uncommittedPreimages []lntypes.Preimage
   401  
   402  	sync.RWMutex
   403  
   404  	// hodlQueue is used to receive exit hop htlc resolutions from invoice
   405  	// registry.
   406  	hodlQueue *queue.ConcurrentQueue
   407  
   408  	// hodlMap stores related htlc data for a circuit key. It allows
   409  	// resolving those htlcs when we receive a message on hodlQueue.
   410  	hodlMap map[channeldb.CircuitKey]hodlHtlc
   411  
   412  	// log is a link-specific logging instance.
   413  	log slog.Logger
   414  
   415  	wg   sync.WaitGroup
   416  	quit chan struct{}
   417  }
   418  
   419  // hodlHtlc contains htlc data that is required for resolution.
   420  type hodlHtlc struct {
   421  	pd         *lnwallet.PaymentDescriptor
   422  	obfuscator hop.ErrorEncrypter
   423  }
   424  
   425  // NewChannelLink creates a new instance of a ChannelLink given a configuration
   426  // and active channel that will be used to verify/apply updates to.
   427  func NewChannelLink(cfg ChannelLinkConfig,
   428  	channel *lnwallet.LightningChannel) ChannelLink {
   429  
   430  	logPrefix := fmt.Sprintf("ChannelLink(%v):", channel.ChannelPoint())
   431  
   432  	return &channelLink{
   433  		cfg:         cfg,
   434  		channel:     channel,
   435  		shortChanID: channel.ShortChanID(),
   436  		// TODO(roasbeef): just do reserve here?
   437  		htlcUpdates:     make(chan *contractcourt.ContractUpdate),
   438  		shutdownRequest: make(chan *shutdownReq),
   439  		hodlMap:         make(map[channeldb.CircuitKey]hodlHtlc),
   440  		hodlQueue:       queue.NewConcurrentQueue(10),
   441  		log:             build.NewPrefixLog(logPrefix, log),
   442  		quit:            make(chan struct{}),
   443  		localUpdateAdd:  make(chan *localUpdateAddMsg),
   444  	}
   445  }
   446  
   447  // A compile time check to ensure channelLink implements the ChannelLink
   448  // interface.
   449  var _ ChannelLink = (*channelLink)(nil)
   450  
   451  // Start starts all helper goroutines required for the operation of the channel
   452  // link.
   453  //
   454  // NOTE: Part of the ChannelLink interface.
   455  func (l *channelLink) Start() error {
   456  	if !atomic.CompareAndSwapInt32(&l.started, 0, 1) {
   457  		err := errors.Errorf("channel link(%v): already started", l)
   458  		l.log.Warn("already started")
   459  		return err
   460  	}
   461  
   462  	l.log.Info("starting")
   463  
   464  	// If the config supplied watchtower client, ensure the channel is
   465  	// registered before trying to use it during operation.
   466  	if l.cfg.TowerClient != nil {
   467  		err := l.cfg.TowerClient.RegisterChannel(l.ChanID())
   468  		if err != nil {
   469  			return err
   470  		}
   471  	}
   472  
   473  	l.mailBox.ResetMessages()
   474  	l.hodlQueue.Start()
   475  
   476  	// Before launching the htlcManager messages, revert any circuits that
   477  	// were marked open in the switch's circuit map, but did not make it
   478  	// into a commitment txn. We use the next local htlc index as the cut
   479  	// off point, since all indexes below that are committed. This action
   480  	// is only performed if the link's final short channel ID has been
   481  	// assigned, otherwise we would try to trim the htlcs belonging to the
   482  	// all-zero, hop.Source ID.
   483  	if l.ShortChanID() != hop.Source {
   484  		localHtlcIndex, err := l.channel.NextLocalHtlcIndex()
   485  		if err != nil {
   486  			return fmt.Errorf("unable to retrieve next local "+
   487  				"htlc index: %v", err)
   488  		}
   489  
   490  		// NOTE: This is automatically done by the switch when it
   491  		// starts up, but is necessary to prevent inconsistencies in
   492  		// the case that the link flaps. This is a result of a link's
   493  		// life-cycle being shorter than that of the switch.
   494  		chanID := l.ShortChanID()
   495  		err = l.cfg.Circuits.TrimOpenCircuits(chanID, localHtlcIndex)
   496  		if err != nil {
   497  			return fmt.Errorf("unable to trim circuits above "+
   498  				"local htlc index %d: %v", localHtlcIndex, err)
   499  		}
   500  
   501  		// Since the link is live, before we start the link we'll update
   502  		// the ChainArbitrator with the set of new channel signals for
   503  		// this channel.
   504  		//
   505  		// TODO(roasbeef): split goroutines within channel arb to avoid
   506  		go func() {
   507  			signals := &contractcourt.ContractSignals{
   508  				HtlcUpdates: l.htlcUpdates,
   509  				ShortChanID: l.channel.ShortChanID(),
   510  			}
   511  
   512  			err := l.cfg.UpdateContractSignals(signals)
   513  			if err != nil {
   514  				l.log.Errorf("unable to update signals")
   515  			}
   516  		}()
   517  	}
   518  
   519  	l.updateFeeTimer = time.NewTimer(l.randomFeeUpdateTimeout())
   520  
   521  	l.wg.Add(1)
   522  	go l.htlcManager()
   523  
   524  	return nil
   525  }
   526  
   527  // Stop gracefully stops all active helper goroutines, then waits until they've
   528  // exited.
   529  //
   530  // NOTE: Part of the ChannelLink interface.
   531  func (l *channelLink) Stop() {
   532  	if !atomic.CompareAndSwapInt32(&l.shutdown, 0, 1) {
   533  		l.log.Warn("already stopped")
   534  		return
   535  	}
   536  
   537  	l.log.Info("stopping")
   538  
   539  	// As the link is stopping, we are no longer interested in htlc
   540  	// resolutions coming from the invoice registry.
   541  	l.cfg.Registry.HodlUnsubscribeAll(l.hodlQueue.ChanIn())
   542  
   543  	if l.cfg.ChainEvents.Cancel != nil {
   544  		l.cfg.ChainEvents.Cancel()
   545  	}
   546  
   547  	// Ensure the channel for the timer is drained.
   548  	if !l.updateFeeTimer.Stop() {
   549  		select {
   550  		case <-l.updateFeeTimer.C:
   551  		default:
   552  		}
   553  	}
   554  
   555  	l.hodlQueue.Stop()
   556  
   557  	close(l.quit)
   558  	l.wg.Wait()
   559  
   560  	// Now that the htlcManager has completely exited, reset the packet
   561  	// courier. This allows the mailbox to revaluate any lingering Adds that
   562  	// were delivered but didn't make it on a commitment to be failed back
   563  	// if the link is offline for an extended period of time. The error is
   564  	// ignored since it can only fail when the daemon is exiting.
   565  	_ = l.mailBox.ResetPackets()
   566  
   567  	// As a final precaution, we will attempt to flush any uncommitted
   568  	// preimages to the preimage cache. The preimages should be re-delivered
   569  	// after channel reestablishment, however this adds an extra layer of
   570  	// protection in case the peer never returns. Without this, we will be
   571  	// unable to settle any contracts depending on the preimages even though
   572  	// we had learned them at some point.
   573  	err := l.cfg.PreimageCache.AddPreimages(l.uncommittedPreimages...)
   574  	if err != nil {
   575  		l.log.Errorf("unable to add preimages=%v to cache: %v",
   576  			l.uncommittedPreimages, err)
   577  	}
   578  }
   579  
   580  // WaitForShutdown blocks until the link finishes shutting down, which includes
   581  // termination of all dependent goroutines.
   582  func (l *channelLink) WaitForShutdown() {
   583  	l.wg.Wait()
   584  }
   585  
   586  // EligibleToForward returns a bool indicating if the channel is able to
   587  // actively accept requests to forward HTLC's. We're able to forward HTLC's if
   588  // we know the remote party's next revocation point. Otherwise, we can't
   589  // initiate new channel state. We also require that the short channel ID not be
   590  // the all-zero source ID, meaning that the channel has had its ID finalized.
   591  func (l *channelLink) EligibleToForward() bool {
   592  	return l.channel.RemoteNextRevocation() != nil &&
   593  		l.ShortChanID() != hop.Source &&
   594  		l.isReestablished()
   595  }
   596  
   597  // isReestablished returns true if the link has successfully completed the
   598  // channel reestablishment dance.
   599  func (l *channelLink) isReestablished() bool {
   600  	return atomic.LoadInt32(&l.reestablished) == 1
   601  }
   602  
   603  // markReestablished signals that the remote peer has successfully exchanged
   604  // channel reestablish messages and that the channel is ready to process
   605  // subsequent messages.
   606  func (l *channelLink) markReestablished() {
   607  	atomic.StoreInt32(&l.reestablished, 1)
   608  }
   609  
   610  // sampleNetworkFee samples the current fee rate on the network to get into the
   611  // chain in a timely manner. The returned value is expressed in atoms-per-kB, as
   612  // this is the native rate used when computing the fee for commitment
   613  // transactions, and the second-level HTLC transactions.
   614  func (l *channelLink) sampleNetworkFee() (chainfee.AtomPerKByte, error) {
   615  	// We'll first query for the atoms/kB recommended to be confirmed
   616  	// within 3 blocks.
   617  	feePerKB, err := l.cfg.FeeEstimator.EstimateFeePerKB(3)
   618  	if err != nil {
   619  		return 0, err
   620  	}
   621  
   622  	l.log.Debugf("sampled fee rate for 3 block conf: %s",
   623  		feePerKB)
   624  
   625  	return feePerKB, nil
   626  }
   627  
   628  // shouldAdjustCommitFee returns true if we should update our commitment fee to
   629  // match that of the network fee. We'll only update our commitment fee if the
   630  // network fee is +/- 10% to our commitment fee or if our current commitment
   631  // fee is below the minimum relay fee.
   632  func shouldAdjustCommitFee(netFee, chanFee,
   633  	minRelayFee chainfee.AtomPerKByte) bool {
   634  
   635  	switch {
   636  	// If the network fee is greater than our current commitment fee and
   637  	// our current commitment fee is below the minimum relay fee then
   638  	// we should switch to it no matter if it is less than a 10% increase.
   639  	case netFee > chanFee && chanFee < minRelayFee:
   640  		return true
   641  
   642  	// If the network fee is greater than the commitment fee, then we'll
   643  	// switch to it if it's at least 10% greater than the commit fee.
   644  	case netFee > chanFee && netFee >= (chanFee+(chanFee*10)/100):
   645  		return true
   646  
   647  	// If the network fee is less than our commitment fee, then we'll
   648  	// switch to it if it's at least 10% less than the commitment fee.
   649  	case netFee < chanFee && netFee <= (chanFee-(chanFee*10)/100):
   650  		return true
   651  
   652  	// Otherwise, we won't modify our fee.
   653  	default:
   654  		return false
   655  	}
   656  }
   657  
   658  // createFailureWithUpdate retrieves this link's last channel update message and
   659  // passes it into the callback. It expects a fully populated failure message.
   660  func (l *channelLink) createFailureWithUpdate(
   661  	cb func(update *lnwire.ChannelUpdate) lnwire.FailureMessage) lnwire.FailureMessage {
   662  
   663  	update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID())
   664  	if err != nil {
   665  		return &lnwire.FailTemporaryNodeFailure{}
   666  	}
   667  
   668  	return cb(update)
   669  }
   670  
   671  // syncChanState attempts to synchronize channel states with the remote party.
   672  // This method is to be called upon reconnection after the initial funding
   673  // flow. We'll compare out commitment chains with the remote party, and re-send
   674  // either a danging commit signature, a revocation, or both.
   675  func (l *channelLink) syncChanStates() error {
   676  	l.log.Info("attempting to re-synchronize")
   677  
   678  	// First, we'll generate our ChanSync message to send to the other
   679  	// side. Based on this message, the remote party will decide if they
   680  	// need to retransmit any data or not.
   681  	chanState := l.channel.State()
   682  	localChanSyncMsg, err := chanState.ChanSyncMsg()
   683  	if err != nil {
   684  		return fmt.Errorf("unable to generate chan sync message for "+
   685  			"ChannelPoint(%v)", l.channel.ChannelPoint())
   686  	}
   687  
   688  	if err := l.cfg.Peer.SendMessage(true, localChanSyncMsg); err != nil {
   689  		return fmt.Errorf("unable to send chan sync message for "+
   690  			"ChannelPoint(%v): %v", l.channel.ChannelPoint(), err)
   691  	}
   692  
   693  	// Track how long the peer is online while NOT sending a
   694  	// ChannelReestablish.
   695  	sendTime := time.Now()
   696  
   697  	var msgsToReSend []lnwire.Message
   698  
   699  	// Next, we'll wait indefinitely to receive the ChanSync message. The
   700  	// first message sent MUST be the ChanSync message.
   701  	select {
   702  	case msg := <-l.upstream:
   703  		remoteChanSyncMsg, ok := msg.(*lnwire.ChannelReestablish)
   704  		if !ok {
   705  			return fmt.Errorf("first message sent to sync "+
   706  				"should be ChannelReestablish, instead "+
   707  				"received: %T", msg)
   708  		}
   709  
   710  		// If the remote party indicates that they think we haven't
   711  		// done any state updates yet, then we'll retransmit the
   712  		// funding locked message first. We do this, as at this point
   713  		// we can't be sure if they've really received the
   714  		// FundingLocked message.
   715  		if remoteChanSyncMsg.NextLocalCommitHeight == 1 &&
   716  			localChanSyncMsg.NextLocalCommitHeight == 1 &&
   717  			!l.channel.IsPending() {
   718  
   719  			l.log.Infof("resending FundingLocked message to peer")
   720  
   721  			nextRevocation, err := l.channel.NextRevocationKey()
   722  			if err != nil {
   723  				return fmt.Errorf("unable to create next "+
   724  					"revocation: %v", err)
   725  			}
   726  
   727  			fundingLockedMsg := lnwire.NewFundingLocked(
   728  				l.ChanID(), nextRevocation,
   729  			)
   730  			err = l.cfg.Peer.SendMessage(false, fundingLockedMsg)
   731  			if err != nil {
   732  				return fmt.Errorf("unable to re-send "+
   733  					"FundingLocked: %v", err)
   734  			}
   735  		}
   736  
   737  		// In any case, we'll then process their ChanSync message.
   738  		l.log.Info("received re-establishment message from remote side")
   739  
   740  		var (
   741  			openedCircuits []CircuitKey
   742  			closedCircuits []CircuitKey
   743  		)
   744  
   745  		// We've just received a ChanSync message from the remote
   746  		// party, so we'll process the message  in order to determine
   747  		// if we need to re-transmit any messages to the remote party.
   748  		msgsToReSend, openedCircuits, closedCircuits, err =
   749  			l.channel.ProcessChanSyncMsg(remoteChanSyncMsg)
   750  		if err != nil {
   751  			return err
   752  		}
   753  
   754  		l.log.Debugf("Processed chan sync msg: %d msgsToReSend, "+
   755  			"%d openedCircuits, %d closedCircuits", len(msgsToReSend),
   756  			len(openedCircuits), len(closedCircuits))
   757  
   758  		// Repopulate any identifiers for circuits that may have been
   759  		// opened or unclosed. This may happen if we needed to
   760  		// retransmit a commitment signature message.
   761  		l.openedCircuits = openedCircuits
   762  		l.closedCircuits = closedCircuits
   763  
   764  		// Ensure that all packets have been have been removed from the
   765  		// link's mailbox.
   766  		if err := l.ackDownStreamPackets(); err != nil {
   767  			return err
   768  		}
   769  
   770  		if len(msgsToReSend) > 0 {
   771  			l.log.Infof("sending %v updates to synchronize the "+
   772  				"state", len(msgsToReSend))
   773  		}
   774  
   775  		// If we have any messages to retransmit, we'll do so
   776  		// immediately so we return to a synchronized state as soon as
   777  		// possible.
   778  		for _, msg := range msgsToReSend {
   779  			l.cfg.Peer.SendMessage(false, msg)
   780  		}
   781  
   782  		// Channel reestablish successfully received, so reset the wait
   783  		// time.
   784  		err := l.cfg.ResetChanReestablishWaitTime(l.shortChanID)
   785  		if err != nil {
   786  			l.log.Errorf("Unable to reset ChannelReestblish wait "+
   787  				"time: %v", err)
   788  		}
   789  
   790  	case <-l.quit:
   791  		// Channel reestablish not received while peer was online. Track
   792  		// how long we waited for a ChannelReestasblish message.
   793  		waitTime := time.Since(sendTime)
   794  		l.log.Debugf("Adding +%s to channel reestablish wait time", waitTime)
   795  		err := l.cfg.AddToChanReestablishWaitTime(l.shortChanID, waitTime)
   796  		if err != nil {
   797  			l.log.Errorf("Unable to track wait time for "+
   798  				"ChannelReestblish msg: %v", err)
   799  		}
   800  
   801  		return ErrLinkShuttingDown
   802  	}
   803  
   804  	return nil
   805  }
   806  
   807  // resolveFwdPkgs loads any forwarding packages for this link from disk, and
   808  // reprocesses them in order. The primary goal is to make sure that any HTLCs
   809  // we previously received are reinstated in memory, and forwarded to the switch
   810  // if necessary. After a restart, this will also delete any previously
   811  // completed packages.
   812  func (l *channelLink) resolveFwdPkgs() error {
   813  	fwdPkgs, err := l.channel.LoadFwdPkgs()
   814  	if err != nil {
   815  		return err
   816  	}
   817  
   818  	l.log.Debugf("loaded %d fwd pks", len(fwdPkgs))
   819  
   820  	for _, fwdPkg := range fwdPkgs {
   821  		if err := l.resolveFwdPkg(fwdPkg); err != nil {
   822  			return err
   823  		}
   824  	}
   825  
   826  	// If any of our reprocessing steps require an update to the commitment
   827  	// txn, we initiate a state transition to capture all relevant changes.
   828  	if l.channel.PendingLocalUpdateCount() > 0 {
   829  		return l.updateCommitTx()
   830  	}
   831  
   832  	return nil
   833  }
   834  
   835  // resolveFwdPkg interprets the FwdState of the provided package, either
   836  // reprocesses any outstanding htlcs in the package, or performs garbage
   837  // collection on the package.
   838  func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) error {
   839  	// Remove any completed packages to clear up space.
   840  	if fwdPkg.State == channeldb.FwdStateCompleted {
   841  		l.log.Debugf("removing completed fwd pkg for height=%d",
   842  			fwdPkg.Height)
   843  
   844  		err := l.channel.RemoveFwdPkgs(fwdPkg.Height)
   845  		if err != nil {
   846  			l.log.Errorf("unable to remove fwd pkg for height=%d: "+
   847  				"%v", fwdPkg.Height, err)
   848  			return err
   849  		}
   850  	}
   851  
   852  	// Otherwise this is either a new package or one has gone through
   853  	// processing, but contains htlcs that need to be restored in memory.
   854  	// We replay this forwarding package to make sure our local mem state
   855  	// is resurrected, we mimic any original responses back to the remote
   856  	// party, and re-forward the relevant HTLCs to the switch.
   857  
   858  	// If the package is fully acked but not completed, it must still have
   859  	// settles and fails to propagate.
   860  	if !fwdPkg.SettleFailFilter.IsFull() {
   861  		settleFails, err := lnwallet.PayDescsFromRemoteLogUpdates(
   862  			fwdPkg.Source, fwdPkg.Height, fwdPkg.SettleFails,
   863  		)
   864  		if err != nil {
   865  			l.log.Errorf("unable to process remote log updates: %v",
   866  				err)
   867  			return err
   868  		}
   869  		l.processRemoteSettleFails(fwdPkg, settleFails)
   870  	}
   871  
   872  	// Finally, replay *ALL ADDS* in this forwarding package. The
   873  	// downstream logic is able to filter out any duplicates, but we must
   874  	// shove the entire, original set of adds down the pipeline so that the
   875  	// batch of adds presented to the sphinx router does not ever change.
   876  	if !fwdPkg.AckFilter.IsFull() {
   877  		adds, err := lnwallet.PayDescsFromRemoteLogUpdates(
   878  			fwdPkg.Source, fwdPkg.Height, fwdPkg.Adds,
   879  		)
   880  		if err != nil {
   881  			l.log.Errorf("unable to process remote log updates: %v",
   882  				err)
   883  			return err
   884  		}
   885  		l.processRemoteAdds(fwdPkg, adds)
   886  
   887  		// If the link failed during processing the adds, we must
   888  		// return to ensure we won't attempted to update the state
   889  		// further.
   890  		if l.failed {
   891  			return fmt.Errorf("link failed while " +
   892  				"processing remote adds")
   893  		}
   894  	}
   895  
   896  	return nil
   897  }
   898  
   899  // fwdPkgGarbager periodically reads all forwarding packages from disk and
   900  // removes those that can be discarded. It is safe to do this entirely in the
   901  // background, since all state is coordinated on disk. This also ensures the
   902  // link can continue to process messages and interleave database accesses.
   903  //
   904  // NOTE: This MUST be run as a goroutine.
   905  func (l *channelLink) fwdPkgGarbager() {
   906  	defer l.wg.Done()
   907  
   908  	l.cfg.FwdPkgGCTicker.Resume()
   909  	defer l.cfg.FwdPkgGCTicker.Stop()
   910  
   911  	if err := l.loadAndRemove(); err != nil {
   912  		l.log.Warnf("unable to run initial fwd pkgs gc: %v", err)
   913  	}
   914  
   915  	for {
   916  		select {
   917  		case <-l.cfg.FwdPkgGCTicker.Ticks():
   918  			if err := l.loadAndRemove(); err != nil {
   919  				l.log.Warnf("unable to remove fwd pkgs: %v",
   920  					err)
   921  				continue
   922  			}
   923  		case <-l.quit:
   924  			return
   925  		}
   926  	}
   927  }
   928  
   929  // loadAndRemove loads all the channels forwarding packages and determines if
   930  // they can be removed. It is called once before the FwdPkgGCTicker ticks so that
   931  // a longer tick interval can be used.
   932  func (l *channelLink) loadAndRemove() error {
   933  	fwdPkgs, err := l.channel.LoadFwdPkgs()
   934  	if err != nil {
   935  		return err
   936  	}
   937  
   938  	var removeHeights []uint64
   939  	for _, fwdPkg := range fwdPkgs {
   940  		if fwdPkg.State != channeldb.FwdStateCompleted {
   941  			continue
   942  		}
   943  
   944  		removeHeights = append(removeHeights, fwdPkg.Height)
   945  	}
   946  
   947  	// If removeHeights is empty, return early so we don't use a db
   948  	// transaction.
   949  	if len(removeHeights) == 0 {
   950  		return nil
   951  	}
   952  
   953  	return l.channel.RemoveFwdPkgs(removeHeights...)
   954  }
   955  
   956  // htlcManager is the primary goroutine which drives a channel's commitment
   957  // update state-machine in response to messages received via several channels.
   958  // This goroutine reads messages from the upstream (remote) peer, and also from
   959  // downstream channel managed by the channel link. In the event that an htlc
   960  // needs to be forwarded, then send-only forward handler is used which sends
   961  // htlc packets to the switch. Additionally, the this goroutine handles acting
   962  // upon all timeouts for any active HTLCs, manages the channel's revocation
   963  // window, and also the htlc trickle queue+timer for this active channels.
   964  //
   965  // NOTE: This MUST be run as a goroutine.
   966  func (l *channelLink) htlcManager() {
   967  	defer func() {
   968  		l.cfg.BatchTicker.Stop()
   969  		l.wg.Done()
   970  		l.log.Infof("exited")
   971  	}()
   972  
   973  	l.log.Infof("HTLC manager started, bandwidth=%v", l.Bandwidth())
   974  
   975  	// Notify any clients that the link is now in the switch via an
   976  	// ActiveLinkEvent.
   977  	l.cfg.NotifyActiveLink(*l.ChannelPoint())
   978  
   979  	// TODO(roasbeef): need to call wipe chan whenever D/C?
   980  
   981  	// If this isn't the first time that this channel link has been
   982  	// created, then we'll need to check to see if we need to
   983  	// re-synchronize state with the remote peer. settledHtlcs is a map of
   984  	// HTLC's that we re-settled as part of the channel state sync.
   985  	if l.cfg.SyncStates {
   986  		err := l.syncChanStates()
   987  		if err != nil {
   988  			l.log.Warnf("error when syncing channel states: %v", err)
   989  
   990  			errDataLoss, localDataLoss :=
   991  				err.(*lnwallet.ErrCommitSyncLocalDataLoss)
   992  
   993  			switch {
   994  			case err == ErrLinkShuttingDown:
   995  				l.log.Debugf("unable to sync channel states, " +
   996  					"link is shutting down")
   997  				return
   998  
   999  			// We failed syncing the commit chains, probably
  1000  			// because the remote has lost state. We should force
  1001  			// close the channel.
  1002  			case err == lnwallet.ErrCommitSyncRemoteDataLoss:
  1003  				fallthrough
  1004  
  1005  			// The remote sent us an invalid last commit secret, we
  1006  			// should force close the channel.
  1007  			// TODO(halseth): and permanently ban the peer?
  1008  			case err == lnwallet.ErrInvalidLastCommitSecret:
  1009  				fallthrough
  1010  
  1011  			// The remote sent us a commit point different from
  1012  			// what they sent us before.
  1013  			// TODO(halseth): ban peer?
  1014  			case err == lnwallet.ErrInvalidLocalUnrevokedCommitPoint:
  1015  				// We'll fail the link and tell the peer to
  1016  				// force close the channel. Note that the
  1017  				// database state is not updated here, but will
  1018  				// be updated when the close transaction is
  1019  				// ready to avoid that we go down before
  1020  				// storing the transaction in the db.
  1021  				l.fail(
  1022  					LinkFailureError{
  1023  						code:       ErrSyncError,
  1024  						ForceClose: true,
  1025  					},
  1026  					"unable to synchronize channel "+
  1027  						"states: %v", err,
  1028  				)
  1029  				return
  1030  
  1031  			// We have lost state and cannot safely force close the
  1032  			// channel. Fail the channel and wait for the remote to
  1033  			// hopefully force close it. The remote has sent us its
  1034  			// latest unrevoked commitment point, and we'll store
  1035  			// it in the database, such that we can attempt to
  1036  			// recover the funds if the remote force closes the
  1037  			// channel.
  1038  			case localDataLoss:
  1039  				err := l.channel.MarkDataLoss(
  1040  					errDataLoss.CommitPoint,
  1041  				)
  1042  				if err != nil {
  1043  					l.log.Errorf("unable to mark channel "+
  1044  						"data loss: %v", err)
  1045  				}
  1046  
  1047  			// We determined the commit chains were not possible to
  1048  			// sync. We cautiously fail the channel, but don't
  1049  			// force close.
  1050  			// TODO(halseth): can we safely force close in any
  1051  			// cases where this error is returned?
  1052  			case err == lnwallet.ErrCannotSyncCommitChains:
  1053  				if err := l.channel.MarkBorked(); err != nil {
  1054  					l.log.Errorf("unable to mark channel "+
  1055  						"borked: %v", err)
  1056  				}
  1057  
  1058  			// Other, unspecified error.
  1059  			default:
  1060  			}
  1061  
  1062  			l.fail(
  1063  				LinkFailureError{
  1064  					code:       ErrRecoveryError,
  1065  					ForceClose: false,
  1066  				},
  1067  				"unable to synchronize channel "+
  1068  					"states: %v", err,
  1069  			)
  1070  			return
  1071  		}
  1072  	}
  1073  
  1074  	// We've successfully reestablished the channel, mark it as such to
  1075  	// allow the switch to forward HTLCs in the outbound direction.
  1076  	l.markReestablished()
  1077  
  1078  	// Now that we've received both funding locked and channel reestablish,
  1079  	// we can go ahead and send the active channel notification. We'll also
  1080  	// defer the inactive notification for when the link exits to ensure
  1081  	// that every active notification is matched by an inactive one.
  1082  	l.cfg.NotifyActiveChannel(*l.ChannelPoint())
  1083  	defer l.cfg.NotifyInactiveChannel(*l.ChannelPoint())
  1084  
  1085  	// With the channel states synced, we now reset the mailbox to ensure
  1086  	// we start processing all unacked packets in order. This is done here
  1087  	// to ensure that all acknowledgments that occur during channel
  1088  	// resynchronization have taken affect, causing us only to pull unacked
  1089  	// packets after starting to read from the downstream mailbox.
  1090  	l.mailBox.ResetPackets()
  1091  
  1092  	// After cleaning up any memory pertaining to incoming packets, we now
  1093  	// replay our forwarding packages to handle any htlcs that can be
  1094  	// processed locally, or need to be forwarded out to the switch. We will
  1095  	// only attempt to resolve packages if our short chan id indicates that
  1096  	// the channel is not pending, otherwise we should have no htlcs to
  1097  	// reforward.
  1098  	if l.ShortChanID() != hop.Source {
  1099  		if err := l.resolveFwdPkgs(); err != nil {
  1100  			l.fail(LinkFailureError{code: ErrInternalError},
  1101  				"unable to resolve fwd pkgs: %v", err)
  1102  			return
  1103  		}
  1104  
  1105  		// With our link's in-memory state fully reconstructed, spawn a
  1106  		// goroutine to manage the reclamation of disk space occupied by
  1107  		// completed forwarding packages.
  1108  		l.wg.Add(1)
  1109  		go l.fwdPkgGarbager()
  1110  	}
  1111  
  1112  	for {
  1113  		// We must always check if we failed at some point processing
  1114  		// the last update before processing the next.
  1115  		if l.failed {
  1116  			l.log.Errorf("link failed, exiting htlcManager")
  1117  			return
  1118  		}
  1119  
  1120  		// If the previous event resulted in a non-empty batch, resume
  1121  		// the batch ticker so that it can be cleared. Otherwise pause
  1122  		// the ticker to prevent waking up the htlcManager while the
  1123  		// batch is empty.
  1124  		if l.channel.PendingLocalUpdateCount() > 0 {
  1125  			l.cfg.BatchTicker.Resume()
  1126  		} else {
  1127  			l.cfg.BatchTicker.Pause()
  1128  		}
  1129  
  1130  		select {
  1131  		// Our update fee timer has fired, so we'll check the network
  1132  		// fee to see if we should adjust our commitment fee.
  1133  		case <-l.updateFeeTimer.C:
  1134  			l.updateFeeTimer.Reset(l.randomFeeUpdateTimeout())
  1135  
  1136  			// If we're not the initiator of the channel, don't we
  1137  			// don't control the fees, so we can ignore this.
  1138  			if !l.channel.IsInitiator() {
  1139  				continue
  1140  			}
  1141  
  1142  			// If we are the initiator, then we'll sample the
  1143  			// current fee rate to get into the chain within 3
  1144  			// blocks.
  1145  			netFee, err := l.sampleNetworkFee()
  1146  			if err != nil {
  1147  				l.log.Errorf("unable to sample network fee: %v",
  1148  					err)
  1149  				continue
  1150  			}
  1151  
  1152  			minRelayFee := l.cfg.FeeEstimator.RelayFeePerKB()
  1153  
  1154  			newCommitFee := l.channel.IdealCommitFeeRate(
  1155  				netFee, minRelayFee,
  1156  				l.cfg.MaxAnchorsCommitFeeRate,
  1157  				l.cfg.MaxFeeAllocation,
  1158  			)
  1159  
  1160  			// We determine if we should adjust the commitment fee
  1161  			// based on the current commitment fee, the suggested
  1162  			// new commitment fee and the current minimum relay fee
  1163  			// rate.
  1164  			commitFee := l.channel.CommitFeeRate()
  1165  			if !shouldAdjustCommitFee(
  1166  				newCommitFee, commitFee, minRelayFee,
  1167  			) {
  1168  				continue
  1169  			}
  1170  
  1171  			// If we do, then we'll send a new UpdateFee message to
  1172  			// the remote party, to be locked in with a new update.
  1173  			if err := l.updateChannelFee(newCommitFee); err != nil {
  1174  				l.log.Errorf("unable to update fee rate: %v",
  1175  					err)
  1176  				continue
  1177  			}
  1178  
  1179  		// The underlying channel has notified us of a unilateral close
  1180  		// carried out by the remote peer. In the case of such an
  1181  		// event, we'll wipe the channel state from the peer, and mark
  1182  		// the contract as fully settled. Afterwards we can exit.
  1183  		//
  1184  		// TODO(roasbeef): add force closure? also breach?
  1185  		case <-l.cfg.ChainEvents.RemoteUnilateralClosure:
  1186  			l.log.Warnf("remote peer has closed on-chain")
  1187  
  1188  			// TODO(roasbeef): remove all together
  1189  			go func() {
  1190  				chanPoint := l.channel.ChannelPoint()
  1191  				l.cfg.Peer.WipeChannel(chanPoint)
  1192  			}()
  1193  
  1194  			return
  1195  
  1196  		case <-l.cfg.BatchTicker.Ticks():
  1197  			// Attempt to extend the remote commitment chain
  1198  			// including all the currently pending entries. If the
  1199  			// send was unsuccessful, then abandon the update,
  1200  			// waiting for the revocation window to open up.
  1201  			if !l.updateCommitTxOrFail() {
  1202  				return
  1203  			}
  1204  
  1205  		case <-l.cfg.PendingCommitTicker.Ticks():
  1206  			l.fail(LinkFailureError{code: ErrRemoteUnresponsive},
  1207  				"unable to complete dance")
  1208  			return
  1209  
  1210  		// A message from the switch was just received. This indicates
  1211  		// that the link is an intermediate hop in a multi-hop HTLC
  1212  		// circuit.
  1213  		case pkt := <-l.downstream:
  1214  			l.handleDownstreamPkt(pkt)
  1215  
  1216  		// A message containing a locally initiated add was received.
  1217  		case msg := <-l.localUpdateAdd:
  1218  			msg.err <- l.handleDownstreamUpdateAdd(msg.pkt)
  1219  
  1220  		// A message from the connected peer was just received. This
  1221  		// indicates that we have a new incoming HTLC, either directly
  1222  		// for us, or part of a multi-hop HTLC circuit.
  1223  		case msg := <-l.upstream:
  1224  			l.handleUpstreamMsg(msg)
  1225  
  1226  		// A htlc resolution is received. This means that we now have a
  1227  		// resolution for a previously accepted htlc.
  1228  		case hodlItem := <-l.hodlQueue.ChanOut():
  1229  			htlcResolution := hodlItem.(invoices.HtlcResolution)
  1230  			err := l.processHodlQueue(htlcResolution)
  1231  			if err != nil {
  1232  				l.fail(LinkFailureError{code: ErrInternalError},
  1233  					fmt.Sprintf("process hodl queue: %v",
  1234  						err.Error()),
  1235  				)
  1236  				return
  1237  			}
  1238  
  1239  		case req := <-l.shutdownRequest:
  1240  			// If the channel is clean, we send nil on the err chan
  1241  			// and return to prevent the htlcManager goroutine from
  1242  			// processing any more updates. The full link shutdown
  1243  			// will be triggered by RemoveLink in the peer.
  1244  			uncleanReasons := l.channel.ReasonsChannelUnclean()
  1245  			if uncleanReasons == "" {
  1246  				req.err <- nil
  1247  				return
  1248  			}
  1249  
  1250  			l.log.Warnf("Link cannot be shutdown due to reasons: %s",
  1251  				uncleanReasons)
  1252  
  1253  			// Otherwise, the channel has lingering updates, send
  1254  			// an error and continue.
  1255  			req.err <- ErrLinkFailedShutdown
  1256  
  1257  		case <-l.quit:
  1258  			return
  1259  		}
  1260  	}
  1261  }
  1262  
  1263  // processHodlQueue processes a received htlc resolution and continues reading
  1264  // from the hodl queue until no more resolutions remain. When this function
  1265  // returns without an error, the commit tx should be updated.
  1266  func (l *channelLink) processHodlQueue(
  1267  	firstResolution invoices.HtlcResolution) error {
  1268  
  1269  	// Try to read all waiting resolution messages, so that they can all be
  1270  	// processed in a single commitment tx update.
  1271  	htlcResolution := firstResolution
  1272  loop:
  1273  	for {
  1274  		// Lookup all hodl htlcs that can be failed or settled with this event.
  1275  		// The hodl htlc must be present in the map.
  1276  		circuitKey := htlcResolution.CircuitKey()
  1277  		hodlHtlc, ok := l.hodlMap[circuitKey]
  1278  		if !ok {
  1279  			return fmt.Errorf("hodl htlc not found: %v", circuitKey)
  1280  		}
  1281  
  1282  		if err := l.processHtlcResolution(htlcResolution, hodlHtlc); err != nil {
  1283  			return err
  1284  		}
  1285  
  1286  		// Clean up hodl map.
  1287  		delete(l.hodlMap, circuitKey)
  1288  
  1289  		select {
  1290  		case item := <-l.hodlQueue.ChanOut():
  1291  			htlcResolution = item.(invoices.HtlcResolution)
  1292  		default:
  1293  			break loop
  1294  		}
  1295  	}
  1296  
  1297  	// Update the commitment tx.
  1298  	if err := l.updateCommitTx(); err != nil {
  1299  		return fmt.Errorf("unable to update commitment: %v", err)
  1300  	}
  1301  
  1302  	return nil
  1303  }
  1304  
  1305  // processHtlcResolution applies a received htlc resolution to the provided
  1306  // htlc. When this function returns without an error, the commit tx should be
  1307  // updated.
  1308  func (l *channelLink) processHtlcResolution(resolution invoices.HtlcResolution,
  1309  	htlc hodlHtlc) error {
  1310  
  1311  	circuitKey := resolution.CircuitKey()
  1312  
  1313  	// Determine required action for the resolution based on the type of
  1314  	// resolution we have received.
  1315  	switch res := resolution.(type) {
  1316  	// Settle htlcs that returned a settle resolution using the preimage
  1317  	// in the resolution.
  1318  	case *invoices.HtlcSettleResolution:
  1319  		l.log.Debugf("received settle resolution for %v "+
  1320  			"with outcome: %v", circuitKey, res.Outcome)
  1321  
  1322  		return l.settleHTLC(res.Preimage, htlc.pd)
  1323  
  1324  	// For htlc failures, we get the relevant failure message based
  1325  	// on the failure resolution and then fail the htlc.
  1326  	case *invoices.HtlcFailResolution:
  1327  		l.log.Debugf("received cancel resolution for "+
  1328  			"%v with outcome: %v", circuitKey, res.Outcome)
  1329  
  1330  		// Get the lnwire failure message based on the resolution
  1331  		// result.
  1332  		failure := getResolutionFailure(res, htlc.pd.Amount)
  1333  
  1334  		l.sendHTLCError(
  1335  			htlc.pd, failure, htlc.obfuscator, true,
  1336  		)
  1337  		return nil
  1338  
  1339  	// Fail if we do not get a settle of fail resolution, since we
  1340  	// are only expecting to handle settles and fails.
  1341  	default:
  1342  		return fmt.Errorf("unknown htlc resolution type: %T",
  1343  			resolution)
  1344  	}
  1345  }
  1346  
  1347  // getResolutionFailure returns the wire message that a htlc resolution should
  1348  // be failed with.
  1349  func getResolutionFailure(resolution *invoices.HtlcFailResolution,
  1350  	amount lnwire.MilliAtom) *LinkError {
  1351  
  1352  	// If the resolution has been resolved as part of a MPP timeout,
  1353  	// we need to fail the htlc with lnwire.FailMppTimeout.
  1354  	if resolution.Outcome == invoices.ResultMppTimeout {
  1355  		return NewDetailedLinkError(
  1356  			&lnwire.FailMPPTimeout{}, resolution.Outcome,
  1357  		)
  1358  	}
  1359  
  1360  	// If the htlc is not a MPP timeout, we fail it with
  1361  	// FailIncorrectDetails. This error is sent for invoice payment
  1362  	// failures such as underpayment/ expiry too soon and hodl invoices
  1363  	// (which return FailIncorrectDetails to avoid leaking information).
  1364  	incorrectDetails := lnwire.NewFailIncorrectDetails(
  1365  		amount, uint32(resolution.AcceptHeight),
  1366  	)
  1367  
  1368  	return NewDetailedLinkError(incorrectDetails, resolution.Outcome)
  1369  }
  1370  
  1371  // randomFeeUpdateTimeout returns a random timeout between the bounds defined
  1372  // within the link's configuration that will be used to determine when the link
  1373  // should propose an update to its commitment fee rate.
  1374  func (l *channelLink) randomFeeUpdateTimeout() time.Duration {
  1375  	lower := int64(l.cfg.MinFeeUpdateTimeout)
  1376  	upper := int64(l.cfg.MaxFeeUpdateTimeout)
  1377  	return time.Duration(prand.Int63n(upper-lower) + lower)
  1378  }
  1379  
  1380  // handleDownstreamUpdateAdd processes an UpdateAddHTLC packet sent from the
  1381  // downstream HTLC Switch.
  1382  func (l *channelLink) handleDownstreamUpdateAdd(pkt *htlcPacket) error {
  1383  	htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC)
  1384  	if !ok {
  1385  		return errors.New("not an UpdateAddHTLC packet")
  1386  	}
  1387  
  1388  	// If hodl.AddOutgoing mode is active, we exit early to simulate
  1389  	// arbitrary delays between the switch adding an ADD to the
  1390  	// mailbox, and the HTLC being added to the commitment state.
  1391  	if l.cfg.HodlMask.Active(hodl.AddOutgoing) {
  1392  		l.log.Warnf(hodl.AddOutgoing.Warning())
  1393  		l.mailBox.AckPacket(pkt.inKey())
  1394  		return nil
  1395  	}
  1396  
  1397  	// A new payment has been initiated via the downstream channel,
  1398  	// so we add the new HTLC to our local log, then update the
  1399  	// commitment chains.
  1400  	htlc.ChanID = l.ChanID()
  1401  	openCircuitRef := pkt.inKey()
  1402  	index, err := l.channel.AddHTLC(htlc, &openCircuitRef)
  1403  	if err != nil {
  1404  		// The HTLC was unable to be added to the state machine,
  1405  		// as a result, we'll signal the switch to cancel the
  1406  		// pending payment.
  1407  		l.log.Warnf("Unable to handle downstream add HTLC: %v",
  1408  			err)
  1409  
  1410  		// Remove this packet from the link's mailbox, this
  1411  		// prevents it from being reprocessed if the link
  1412  		// restarts and resets it mailbox. If this response
  1413  		// doesn't make it back to the originating link, it will
  1414  		// be rejected upon attempting to reforward the Add to
  1415  		// the switch, since the circuit was never fully opened,
  1416  		// and the forwarding package shows it as
  1417  		// unacknowledged.
  1418  		l.mailBox.FailAdd(pkt)
  1419  
  1420  		return NewDetailedLinkError(
  1421  			lnwire.NewTemporaryChannelFailure(nil),
  1422  			OutgoingFailureDownstreamHtlcAdd,
  1423  		)
  1424  	}
  1425  
  1426  	l.log.Tracef("received downstream htlc: payment_hash=%x, "+
  1427  		"local_log_index=%v, pend_updates=%v",
  1428  		htlc.PaymentHash[:], index,
  1429  		l.channel.PendingLocalUpdateCount())
  1430  
  1431  	pkt.outgoingChanID = l.ShortChanID()
  1432  	pkt.outgoingHTLCID = index
  1433  	htlc.ID = index
  1434  
  1435  	l.log.Debugf("queueing keystone of ADD open circuit: %s->%s",
  1436  		pkt.inKey(), pkt.outKey())
  1437  
  1438  	l.openedCircuits = append(l.openedCircuits, pkt.inKey())
  1439  	l.keystoneBatch = append(l.keystoneBatch, pkt.keystone())
  1440  
  1441  	_ = l.cfg.Peer.SendMessage(false, htlc)
  1442  
  1443  	// Send a forward event notification to htlcNotifier.
  1444  	l.cfg.HtlcNotifier.NotifyForwardingEvent(
  1445  		newHtlcKey(pkt),
  1446  		HtlcInfo{
  1447  			IncomingTimeLock: pkt.incomingTimeout,
  1448  			IncomingAmt:      pkt.incomingAmount,
  1449  			OutgoingTimeLock: htlc.Expiry,
  1450  			OutgoingAmt:      htlc.Amount,
  1451  		},
  1452  		getEventType(pkt),
  1453  	)
  1454  
  1455  	l.tryBatchUpdateCommitTx()
  1456  
  1457  	return nil
  1458  }
  1459  
  1460  // handleDownstreamPkt processes an HTLC packet sent from the downstream HTLC
  1461  // Switch. Possible messages sent by the switch include requests to forward new
  1462  // HTLCs, timeout previously cleared HTLCs, and finally to settle currently
  1463  // cleared HTLCs with the upstream peer.
  1464  //
  1465  // TODO(roasbeef): add sync ntfn to ensure switch always has consistent view?
  1466  func (l *channelLink) handleDownstreamPkt(pkt *htlcPacket) {
  1467  	switch htlc := pkt.htlc.(type) {
  1468  	case *lnwire.UpdateAddHTLC:
  1469  		// Handle add message. The returned error can be ignored,
  1470  		// because it is also sent through the mailbox.
  1471  		_ = l.handleDownstreamUpdateAdd(pkt)
  1472  
  1473  	case *lnwire.UpdateFulfillHTLC:
  1474  		// If hodl.SettleOutgoing mode is active, we exit early to
  1475  		// simulate arbitrary delays between the switch adding the
  1476  		// SETTLE to the mailbox, and the HTLC being added to the
  1477  		// commitment state.
  1478  		if l.cfg.HodlMask.Active(hodl.SettleOutgoing) {
  1479  			l.log.Warnf(hodl.SettleOutgoing.Warning())
  1480  			l.mailBox.AckPacket(pkt.inKey())
  1481  			return
  1482  		}
  1483  
  1484  		// An HTLC we forward to the switch has just settled somewhere
  1485  		// upstream. Therefore we settle the HTLC within the our local
  1486  		// state machine.
  1487  		inKey := pkt.inKey()
  1488  		err := l.channel.SettleHTLC(
  1489  			htlc.PaymentPreimage,
  1490  			pkt.incomingHTLCID,
  1491  			pkt.sourceRef,
  1492  			pkt.destRef,
  1493  			&inKey,
  1494  		)
  1495  		if err != nil {
  1496  			l.log.Errorf("unable to settle incoming HTLC for "+
  1497  				"circuit-key=%v: %v", inKey, err)
  1498  
  1499  			// If the HTLC index for Settle response was not known
  1500  			// to our commitment state, it has already been
  1501  			// cleaned up by a prior response. We'll thus try to
  1502  			// clean up any lingering state to ensure we don't
  1503  			// continue reforwarding.
  1504  			if _, ok := err.(lnwallet.ErrUnknownHtlcIndex); ok {
  1505  				l.cleanupSpuriousResponse(pkt)
  1506  			}
  1507  
  1508  			// Remove the packet from the link's mailbox to ensure
  1509  			// it doesn't get replayed after a reconnection.
  1510  			l.mailBox.AckPacket(inKey)
  1511  
  1512  			return
  1513  		}
  1514  
  1515  		l.log.Debugf("queueing removal of SETTLE closed circuit: "+
  1516  			"%s->%s", pkt.inKey(), pkt.outKey())
  1517  
  1518  		l.closedCircuits = append(l.closedCircuits, pkt.inKey())
  1519  
  1520  		// With the HTLC settled, we'll need to populate the wire
  1521  		// message to target the specific channel and HTLC to be
  1522  		// canceled.
  1523  		htlc.ChanID = l.ChanID()
  1524  		htlc.ID = pkt.incomingHTLCID
  1525  
  1526  		// Then we send the HTLC settle message to the connected peer
  1527  		// so we can continue the propagation of the settle message.
  1528  		l.cfg.Peer.SendMessage(false, htlc)
  1529  
  1530  		// Send a settle event notification to htlcNotifier.
  1531  		l.cfg.HtlcNotifier.NotifySettleEvent(
  1532  			newHtlcKey(pkt),
  1533  			htlc.PaymentPreimage,
  1534  			getEventType(pkt),
  1535  		)
  1536  
  1537  		// Immediately update the commitment tx to minimize latency.
  1538  		l.updateCommitTxOrFail()
  1539  
  1540  	case *lnwire.UpdateFailHTLC:
  1541  		// If hodl.FailOutgoing mode is active, we exit early to
  1542  		// simulate arbitrary delays between the switch adding a FAIL to
  1543  		// the mailbox, and the HTLC being added to the commitment
  1544  		// state.
  1545  		if l.cfg.HodlMask.Active(hodl.FailOutgoing) {
  1546  			l.log.Warnf(hodl.FailOutgoing.Warning())
  1547  			l.mailBox.AckPacket(pkt.inKey())
  1548  			return
  1549  		}
  1550  
  1551  		// An HTLC cancellation has been triggered somewhere upstream,
  1552  		// we'll remove then HTLC from our local state machine.
  1553  		inKey := pkt.inKey()
  1554  		err := l.channel.FailHTLC(
  1555  			pkt.incomingHTLCID,
  1556  			htlc.Reason,
  1557  			pkt.sourceRef,
  1558  			pkt.destRef,
  1559  			&inKey,
  1560  		)
  1561  		if err != nil {
  1562  			l.log.Errorf("unable to cancel incoming HTLC for "+
  1563  				"circuit-key=%v: %v", inKey, err)
  1564  
  1565  			// If the HTLC index for Fail response was not known to
  1566  			// our commitment state, it has already been cleaned up
  1567  			// by a prior response. We'll thus try to clean up any
  1568  			// lingering state to ensure we don't continue
  1569  			// reforwarding.
  1570  			if _, ok := err.(lnwallet.ErrUnknownHtlcIndex); ok {
  1571  				l.cleanupSpuriousResponse(pkt)
  1572  			}
  1573  
  1574  			// Remove the packet from the link's mailbox to ensure
  1575  			// it doesn't get replayed after a reconnection.
  1576  			l.mailBox.AckPacket(inKey)
  1577  
  1578  			return
  1579  		}
  1580  
  1581  		l.log.Debugf("queueing removal of FAIL closed circuit: %s->%s",
  1582  			pkt.inKey(), pkt.outKey())
  1583  
  1584  		l.closedCircuits = append(l.closedCircuits, pkt.inKey())
  1585  
  1586  		// With the HTLC removed, we'll need to populate the wire
  1587  		// message to target the specific channel and HTLC to be
  1588  		// canceled. The "Reason" field will have already been set
  1589  		// within the switch.
  1590  		htlc.ChanID = l.ChanID()
  1591  		htlc.ID = pkt.incomingHTLCID
  1592  
  1593  		// We send the HTLC message to the peer which initially created
  1594  		// the HTLC.
  1595  		l.cfg.Peer.SendMessage(false, htlc)
  1596  
  1597  		// If the packet does not have a link failure set, it failed
  1598  		// further down the route so we notify a forwarding failure.
  1599  		// Otherwise, we notify a link failure because it failed at our
  1600  		// node.
  1601  		if pkt.linkFailure != nil {
  1602  			l.cfg.HtlcNotifier.NotifyLinkFailEvent(
  1603  				newHtlcKey(pkt),
  1604  				newHtlcInfo(pkt),
  1605  				getEventType(pkt),
  1606  				pkt.linkFailure,
  1607  				false,
  1608  			)
  1609  		} else {
  1610  			l.cfg.HtlcNotifier.NotifyForwardingFailEvent(
  1611  				newHtlcKey(pkt), getEventType(pkt),
  1612  			)
  1613  		}
  1614  
  1615  		// Immediately update the commitment tx to minimize latency.
  1616  		l.updateCommitTxOrFail()
  1617  	}
  1618  }
  1619  
  1620  // tryBatchUpdateCommitTx updates the commitment transaction if the batch is
  1621  // full.
  1622  func (l *channelLink) tryBatchUpdateCommitTx() {
  1623  	if l.channel.PendingLocalUpdateCount() < uint64(l.cfg.BatchSize) {
  1624  		return
  1625  	}
  1626  
  1627  	l.updateCommitTxOrFail()
  1628  }
  1629  
  1630  // cleanupSpuriousResponse attempts to ack any AddRef or SettleFailRef
  1631  // associated with this packet. If successful in doing so, it will also purge
  1632  // the open circuit from the circuit map and remove the packet from the link's
  1633  // mailbox.
  1634  func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
  1635  	inKey := pkt.inKey()
  1636  
  1637  	l.log.Debugf("cleaning up spurious response for incoming "+
  1638  		"circuit-key=%v", inKey)
  1639  
  1640  	// If the htlc packet doesn't have a source reference, it is unsafe to
  1641  	// proceed, as skipping this ack may cause the htlc to be reforwarded.
  1642  	if pkt.sourceRef == nil {
  1643  		l.log.Errorf("unable to cleanup response for incoming "+
  1644  			"circuit-key=%v, does not contain source reference",
  1645  			inKey)
  1646  		return
  1647  	}
  1648  
  1649  	// If the source reference is present,  we will try to prevent this link
  1650  	// from resending the packet to the switch. To do so, we ack the AddRef
  1651  	// of the incoming HTLC belonging to this link.
  1652  	err := l.channel.AckAddHtlcs(*pkt.sourceRef)
  1653  	if err != nil {
  1654  		l.log.Errorf("unable to ack AddRef for incoming "+
  1655  			"circuit-key=%v: %v", inKey, err)
  1656  
  1657  		// If this operation failed, it is unsafe to attempt removal of
  1658  		// the destination reference or circuit, so we exit early. The
  1659  		// cleanup may proceed with a different packet in the future
  1660  		// that succeeds on this step.
  1661  		return
  1662  	}
  1663  
  1664  	// Now that we know this link will stop retransmitting Adds to the
  1665  	// switch, we can begin to teardown the response reference and circuit
  1666  	// map.
  1667  	//
  1668  	// If the packet includes a destination reference, then a response for
  1669  	// this HTLC was locked into the outgoing channel. Attempt to remove
  1670  	// this reference, so we stop retransmitting the response internally.
  1671  	// Even if this fails, we will proceed in trying to delete the circuit.
  1672  	// When retransmitting responses, the destination references will be
  1673  	// cleaned up if an open circuit is not found in the circuit map.
  1674  	if pkt.destRef != nil {
  1675  		err := l.channel.AckSettleFails(*pkt.destRef)
  1676  		if err != nil {
  1677  			l.log.Errorf("unable to ack SettleFailRef "+
  1678  				"for incoming circuit-key=%v: %v",
  1679  				inKey, err)
  1680  		}
  1681  	}
  1682  
  1683  	l.log.Debugf("deleting circuit for incoming circuit-key=%x", inKey)
  1684  
  1685  	// With all known references acked, we can now safely delete the circuit
  1686  	// from the switch's circuit map, as the state is no longer needed.
  1687  	err = l.cfg.Circuits.DeleteCircuits(inKey)
  1688  	if err != nil {
  1689  		l.log.Errorf("unable to delete circuit for "+
  1690  			"circuit-key=%v: %v", inKey, err)
  1691  	}
  1692  }
  1693  
  1694  // handleUpstreamMsg processes wire messages related to commitment state
  1695  // updates from the upstream peer. The upstream peer is the peer whom we have a
  1696  // direct channel with, updating our respective commitment chains.
  1697  func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
  1698  	switch msg := msg.(type) {
  1699  
  1700  	case *lnwire.UpdateAddHTLC:
  1701  		// We just received an add request from an upstream peer, so we
  1702  		// add it to our state machine, then add the HTLC to our
  1703  		// "settle" list in the event that we know the preimage.
  1704  		index, err := l.channel.ReceiveHTLC(msg)
  1705  		if err != nil {
  1706  			l.fail(LinkFailureError{code: ErrInvalidUpdate},
  1707  				"unable to handle upstream add HTLC: %v", err)
  1708  			return
  1709  		}
  1710  
  1711  		l.log.Tracef("receive upstream htlc with payment hash(%x), "+
  1712  			"assigning index: %v", msg.PaymentHash[:], index)
  1713  
  1714  	case *lnwire.UpdateFulfillHTLC:
  1715  		pre := msg.PaymentPreimage
  1716  		idx := msg.ID
  1717  		if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil {
  1718  			l.fail(
  1719  				LinkFailureError{
  1720  					code:       ErrInvalidUpdate,
  1721  					ForceClose: true,
  1722  				},
  1723  				"unable to handle upstream settle HTLC: %v", err,
  1724  			)
  1725  			return
  1726  		}
  1727  
  1728  		settlePacket := &htlcPacket{
  1729  			outgoingChanID: l.ShortChanID(),
  1730  			outgoingHTLCID: idx,
  1731  			htlc: &lnwire.UpdateFulfillHTLC{
  1732  				PaymentPreimage: pre,
  1733  			},
  1734  		}
  1735  
  1736  		// Add the newly discovered preimage to our growing list of
  1737  		// uncommitted preimage. These will be written to the witness
  1738  		// cache just before accepting the next commitment signature
  1739  		// from the remote peer.
  1740  		l.uncommittedPreimages = append(l.uncommittedPreimages, pre)
  1741  
  1742  		// Pipeline this settle, send it to the switch.
  1743  		go l.forwardBatch(settlePacket)
  1744  
  1745  	case *lnwire.UpdateFailMalformedHTLC:
  1746  		// Convert the failure type encoded within the HTLC fail
  1747  		// message to the proper generic lnwire error code.
  1748  		var failure lnwire.FailureMessage
  1749  		switch msg.FailureCode {
  1750  		case lnwire.CodeInvalidOnionVersion:
  1751  			failure = &lnwire.FailInvalidOnionVersion{
  1752  				OnionSHA256: msg.ShaOnionBlob,
  1753  			}
  1754  		case lnwire.CodeInvalidOnionHmac:
  1755  			failure = &lnwire.FailInvalidOnionHmac{
  1756  				OnionSHA256: msg.ShaOnionBlob,
  1757  			}
  1758  
  1759  		case lnwire.CodeInvalidOnionKey:
  1760  			failure = &lnwire.FailInvalidOnionKey{
  1761  				OnionSHA256: msg.ShaOnionBlob,
  1762  			}
  1763  		default:
  1764  			l.log.Warnf("unexpected failure code received in "+
  1765  				"UpdateFailMailformedHTLC: %v", msg.FailureCode)
  1766  
  1767  			// We don't just pass back the error we received from
  1768  			// our successor. Otherwise we might report a failure
  1769  			// that penalizes us more than needed. If the onion that
  1770  			// we forwarded was correct, the node should have been
  1771  			// able to send back its own failure. The node did not
  1772  			// send back its own failure, so we assume there was a
  1773  			// problem with the onion and report that back. We reuse
  1774  			// the invalid onion key failure because there is no
  1775  			// specific error for this case.
  1776  			failure = &lnwire.FailInvalidOnionKey{
  1777  				OnionSHA256: msg.ShaOnionBlob,
  1778  			}
  1779  		}
  1780  
  1781  		// With the error parsed, we'll convert the into it's opaque
  1782  		// form.
  1783  		var b bytes.Buffer
  1784  		if err := lnwire.EncodeFailure(&b, failure, 0); err != nil {
  1785  			l.log.Errorf("unable to encode malformed error: %v", err)
  1786  			return
  1787  		}
  1788  
  1789  		// If remote side have been unable to parse the onion blob we
  1790  		// have sent to it, than we should transform the malformed HTLC
  1791  		// message to the usual HTLC fail message.
  1792  		err := l.channel.ReceiveFailHTLC(msg.ID, b.Bytes())
  1793  		if err != nil {
  1794  			l.fail(LinkFailureError{code: ErrInvalidUpdate},
  1795  				"unable to handle upstream fail HTLC: %v", err)
  1796  			return
  1797  		}
  1798  
  1799  	case *lnwire.UpdateFailHTLC:
  1800  		idx := msg.ID
  1801  		err := l.channel.ReceiveFailHTLC(idx, msg.Reason[:])
  1802  		if err != nil {
  1803  			l.fail(LinkFailureError{code: ErrInvalidUpdate},
  1804  				"unable to handle upstream fail HTLC: %v", err)
  1805  			return
  1806  		}
  1807  
  1808  	case *lnwire.CommitSig:
  1809  		// Since we may have learned new preimages for the first time,
  1810  		// we'll add them to our preimage cache. By doing this, we
  1811  		// ensure any contested contracts watched by any on-chain
  1812  		// arbitrators can now sweep this HTLC on-chain. We delay
  1813  		// committing the preimages until just before accepting the new
  1814  		// remote commitment, as afterwards the peer won't resend the
  1815  		// Settle messages on the next channel reestablishment. Doing so
  1816  		// allows us to more effectively batch this operation, instead
  1817  		// of doing a single write per preimage.
  1818  		err := l.cfg.PreimageCache.AddPreimages(
  1819  			l.uncommittedPreimages...,
  1820  		)
  1821  		if err != nil {
  1822  			l.fail(
  1823  				LinkFailureError{code: ErrInternalError},
  1824  				"unable to add preimages=%v to cache: %v",
  1825  				l.uncommittedPreimages, err,
  1826  			)
  1827  			return
  1828  		}
  1829  
  1830  		// Instead of truncating the slice to conserve memory
  1831  		// allocations, we simply set the uncommitted preimage slice to
  1832  		// nil so that a new one will be initialized if any more
  1833  		// witnesses are discovered. We do this maximum size of the
  1834  		// slice can occupy 15KB, and want to ensure we release that
  1835  		// memory back to the runtime.
  1836  		l.uncommittedPreimages = nil
  1837  
  1838  		// We just received a new updates to our local commitment
  1839  		// chain, validate this new commitment, closing the link if
  1840  		// invalid.
  1841  		err = l.channel.ReceiveNewCommitment(msg.CommitSig, msg.HtlcSigs)
  1842  		if err != nil {
  1843  			// If we were unable to reconstruct their proposed
  1844  			// commitment, then we'll examine the type of error. If
  1845  			// it's an InvalidCommitSigError, then we'll send a
  1846  			// direct error.
  1847  			var sendData []byte
  1848  			switch err.(type) {
  1849  			case *lnwallet.InvalidCommitSigError:
  1850  				sendData = []byte(err.Error())
  1851  			case *lnwallet.InvalidHtlcSigError:
  1852  				sendData = []byte(err.Error())
  1853  			}
  1854  			l.fail(
  1855  				LinkFailureError{
  1856  					code:       ErrInvalidCommitment,
  1857  					ForceClose: true,
  1858  					SendData:   sendData,
  1859  				},
  1860  				"ChannelPoint(%v): unable to accept new "+
  1861  					"commitment: %v",
  1862  				l.channel.ChannelPoint(), err,
  1863  			)
  1864  			return
  1865  		}
  1866  
  1867  		// As we've just accepted a new state, we'll now
  1868  		// immediately send the remote peer a revocation for our prior
  1869  		// state.
  1870  		nextRevocation, currentHtlcs, err := l.channel.RevokeCurrentCommitment()
  1871  		if err != nil {
  1872  			l.log.Errorf("unable to revoke commitment: %v", err)
  1873  			return
  1874  		}
  1875  		l.cfg.Peer.SendMessage(false, nextRevocation)
  1876  
  1877  		// Since we just revoked our commitment, we may have a new set
  1878  		// of HTLC's on our commitment, so we'll send them over our
  1879  		// HTLC update channel so any callers can be notified.
  1880  		select {
  1881  		case l.htlcUpdates <- &contractcourt.ContractUpdate{
  1882  			HtlcKey: contractcourt.LocalHtlcSet,
  1883  			Htlcs:   currentHtlcs,
  1884  		}:
  1885  		case <-l.quit:
  1886  			return
  1887  		}
  1888  
  1889  		// If both commitment chains are fully synced from our PoV,
  1890  		// then we don't need to reply with a signature as both sides
  1891  		// already have a commitment with the latest accepted.
  1892  		if !l.channel.OweCommitment(true) {
  1893  			return
  1894  		}
  1895  
  1896  		// Otherwise, the remote party initiated the state transition,
  1897  		// so we'll reply with a signature to provide them with their
  1898  		// version of the latest commitment.
  1899  		if !l.updateCommitTxOrFail() {
  1900  			return
  1901  		}
  1902  
  1903  	case *lnwire.RevokeAndAck:
  1904  		// We've received a revocation from the remote chain, if valid,
  1905  		// this moves the remote chain forward, and expands our
  1906  		// revocation window.
  1907  		fwdPkg, adds, settleFails, remoteHTLCs, err := l.channel.ReceiveRevocation(
  1908  			msg,
  1909  		)
  1910  		if err != nil {
  1911  			// TODO(halseth): force close?
  1912  			l.fail(LinkFailureError{code: ErrInvalidRevocation},
  1913  				"unable to accept revocation: %v", err)
  1914  			return
  1915  		}
  1916  
  1917  		// The remote party now has a new primary commitment, so we'll
  1918  		// update the contract court to be aware of this new set (the
  1919  		// prior old remote pending).
  1920  		select {
  1921  		case l.htlcUpdates <- &contractcourt.ContractUpdate{
  1922  			HtlcKey: contractcourt.RemoteHtlcSet,
  1923  			Htlcs:   remoteHTLCs,
  1924  		}:
  1925  		case <-l.quit:
  1926  			return
  1927  		}
  1928  
  1929  		// If we have a tower client for this channel type, we'll
  1930  		if l.cfg.TowerClient != nil {
  1931  			state := l.channel.State()
  1932  			breachInfo, err := lnwallet.NewBreachRetribution(
  1933  				state, state.RemoteCommitment.CommitHeight-1, 0,
  1934  			)
  1935  			if err != nil {
  1936  				l.fail(LinkFailureError{code: ErrInternalError},
  1937  					"failed to load breach info: %v", err)
  1938  				return
  1939  			}
  1940  
  1941  			chanID := l.ChanID()
  1942  			err = l.cfg.TowerClient.BackupState(
  1943  				&chanID, breachInfo, state.ChanType,
  1944  			)
  1945  			if err != nil {
  1946  				l.fail(LinkFailureError{code: ErrInternalError},
  1947  					"unable to queue breach backup: %v", err)
  1948  				return
  1949  			}
  1950  		}
  1951  
  1952  		l.processRemoteSettleFails(fwdPkg, settleFails)
  1953  		l.processRemoteAdds(fwdPkg, adds)
  1954  
  1955  		// If the link failed during processing the adds, we must
  1956  		// return to ensure we won't attempted to update the state
  1957  		// further.
  1958  		if l.failed {
  1959  			return
  1960  		}
  1961  
  1962  		// The revocation window opened up. If there are pending local
  1963  		// updates, try to update the commit tx. Pending updates could
  1964  		// already have been present because of a previously failed
  1965  		// update to the commit tx or freshly added in by
  1966  		// processRemoteAdds. Also in case there are no local updates,
  1967  		// but there are still remote updates that are not in the remote
  1968  		// commit tx yet, send out an update.
  1969  		if l.channel.OweCommitment(true) {
  1970  			if !l.updateCommitTxOrFail() {
  1971  				return
  1972  			}
  1973  		}
  1974  
  1975  	case *lnwire.UpdateFee:
  1976  		// We received fee update from peer. If we are the initiator we
  1977  		// will fail the channel, if not we will apply the update.
  1978  		fee := chainfee.AtomPerKByte(msg.FeePerKB)
  1979  		if err := l.channel.ReceiveUpdateFee(fee); err != nil {
  1980  			l.fail(LinkFailureError{code: ErrInvalidUpdate},
  1981  				"error receiving fee update: %v", err)
  1982  			return
  1983  		}
  1984  
  1985  		// Update the mailbox's feerate as well.
  1986  		l.mailBox.SetFeeRate(fee)
  1987  
  1988  	case *lnwire.Error:
  1989  		// Error received from remote, MUST fail channel, but should
  1990  		// only print the contents of the error message if all
  1991  		// characters are printable ASCII.
  1992  		l.fail(
  1993  			LinkFailureError{
  1994  				code: ErrRemoteError,
  1995  
  1996  				// TODO(halseth): we currently don't fail the
  1997  				// channel permanently, as there are some sync
  1998  				// issues with other implementations that will
  1999  				// lead to them sending an error message, but
  2000  				// we can recover from on next connection. See
  2001  				// https://github.com/ElementsProject/lightning/issues/4212
  2002  				PermanentFailure: false,
  2003  			},
  2004  			"ChannelPoint(%v): received error from peer: %v",
  2005  			l.channel.ChannelPoint(), msg.Error(),
  2006  		)
  2007  	default:
  2008  		l.log.Warnf("received unknown message of type %T", msg)
  2009  	}
  2010  
  2011  }
  2012  
  2013  // ackDownStreamPackets is responsible for removing htlcs from a link's mailbox
  2014  // for packets delivered from server, and cleaning up any circuits closed by
  2015  // signing a previous commitment txn. This method ensures that the circuits are
  2016  // removed from the circuit map before removing them from the link's mailbox,
  2017  // otherwise it could be possible for some circuit to be missed if this link
  2018  // flaps.
  2019  func (l *channelLink) ackDownStreamPackets() error {
  2020  	// First, remove the downstream Add packets that were included in the
  2021  	// previous commitment signature. This will prevent the Adds from being
  2022  	// replayed if this link disconnects.
  2023  	for _, inKey := range l.openedCircuits {
  2024  		// In order to test the sphinx replay logic of the remote
  2025  		// party, unsafe replay does not acknowledge the packets from
  2026  		// the mailbox. We can then force a replay of any Add packets
  2027  		// held in memory by disconnecting and reconnecting the link.
  2028  		if l.cfg.UnsafeReplay {
  2029  			continue
  2030  		}
  2031  
  2032  		l.log.Debugf("removing Add packet %s from mailbox", inKey)
  2033  		l.mailBox.AckPacket(inKey)
  2034  	}
  2035  
  2036  	// Now, we will delete all circuits closed by the previous commitment
  2037  	// signature, which is the result of downstream Settle/Fail packets. We
  2038  	// batch them here to ensure circuits are closed atomically and for
  2039  	// performance.
  2040  	err := l.cfg.Circuits.DeleteCircuits(l.closedCircuits...)
  2041  	switch err {
  2042  	case nil:
  2043  		// Successful deletion.
  2044  
  2045  	default:
  2046  		l.log.Errorf("unable to delete %d circuits: %v",
  2047  			len(l.closedCircuits), err)
  2048  		return err
  2049  	}
  2050  
  2051  	// With the circuits removed from memory and disk, we now ack any
  2052  	// Settle/Fails in the mailbox to ensure they do not get redelivered
  2053  	// after startup. If forgive is enabled and we've reached this point,
  2054  	// the circuits must have been removed at some point, so it is now safe
  2055  	// to un-queue the corresponding Settle/Fails.
  2056  	for _, inKey := range l.closedCircuits {
  2057  		l.log.Debugf("removing Fail/Settle packet %s from mailbox",
  2058  			inKey)
  2059  		l.mailBox.AckPacket(inKey)
  2060  	}
  2061  
  2062  	// Lastly, reset our buffers to be empty while keeping any acquired
  2063  	// growth in the backing array.
  2064  	l.openedCircuits = l.openedCircuits[:0]
  2065  	l.closedCircuits = l.closedCircuits[:0]
  2066  
  2067  	return nil
  2068  }
  2069  
  2070  // updateCommitTxOrFail updates the commitment tx and if that fails, it fails
  2071  // the link.
  2072  func (l *channelLink) updateCommitTxOrFail() bool {
  2073  	if err := l.updateCommitTx(); err != nil {
  2074  		l.fail(LinkFailureError{code: ErrInternalError},
  2075  			"unable to update commitment: %v", err)
  2076  		return false
  2077  	}
  2078  
  2079  	return true
  2080  }
  2081  
  2082  // updateCommitTx signs, then sends an update to the remote peer adding a new
  2083  // commitment to their commitment chain which includes all the latest updates
  2084  // we've received+processed up to this point.
  2085  func (l *channelLink) updateCommitTx() error {
  2086  	// Preemptively write all pending keystones to disk, just in case the
  2087  	// HTLCs we have in memory are included in the subsequent attempt to
  2088  	// sign a commitment state.
  2089  	err := l.cfg.Circuits.OpenCircuits(l.keystoneBatch...)
  2090  	if err != nil {
  2091  		return err
  2092  	}
  2093  
  2094  	// Reset the batch, but keep the backing buffer to avoid reallocating.
  2095  	l.keystoneBatch = l.keystoneBatch[:0]
  2096  
  2097  	// If hodl.Commit mode is active, we will refrain from attempting to
  2098  	// commit any in-memory modifications to the channel state. Exiting here
  2099  	// permits testing of either the switch or link's ability to trim
  2100  	// circuits that have been opened, but unsuccessfully committed.
  2101  	if l.cfg.HodlMask.Active(hodl.Commit) {
  2102  		l.log.Warnf(hodl.Commit.Warning())
  2103  		return nil
  2104  	}
  2105  
  2106  	theirCommitSig, htlcSigs, pendingHTLCs, err := l.channel.SignNextCommitment()
  2107  	if err == lnwallet.ErrNoWindow {
  2108  		l.cfg.PendingCommitTicker.Resume()
  2109  
  2110  		l.log.Tracef("revocation window exhausted, unable to send: "+
  2111  			"%v, pend_updates=%v, dangling_closes%v",
  2112  			l.channel.PendingLocalUpdateCount(),
  2113  			newLogClosure(func() string {
  2114  				return spew.Sdump(l.openedCircuits)
  2115  			}),
  2116  			newLogClosure(func() string {
  2117  				return spew.Sdump(l.closedCircuits)
  2118  			}),
  2119  		)
  2120  		return nil
  2121  	} else if err != nil {
  2122  		return err
  2123  	}
  2124  
  2125  	if err := l.ackDownStreamPackets(); err != nil {
  2126  		return err
  2127  	}
  2128  
  2129  	l.cfg.PendingCommitTicker.Pause()
  2130  
  2131  	// The remote party now has a new pending commitment, so we'll update
  2132  	// the contract court to be aware of this new set (the prior old remote
  2133  	// pending).
  2134  	select {
  2135  	case l.htlcUpdates <- &contractcourt.ContractUpdate{
  2136  		HtlcKey: contractcourt.RemotePendingHtlcSet,
  2137  		Htlcs:   pendingHTLCs,
  2138  	}:
  2139  	case <-l.quit:
  2140  		return ErrLinkShuttingDown
  2141  	}
  2142  
  2143  	commitSig := &lnwire.CommitSig{
  2144  		ChanID:    l.ChanID(),
  2145  		CommitSig: theirCommitSig,
  2146  		HtlcSigs:  htlcSigs,
  2147  	}
  2148  	l.cfg.Peer.SendMessage(false, commitSig)
  2149  
  2150  	return nil
  2151  }
  2152  
  2153  // Peer returns the representation of remote peer with which we have the
  2154  // channel link opened.
  2155  //
  2156  // NOTE: Part of the ChannelLink interface.
  2157  func (l *channelLink) Peer() lnpeer.Peer {
  2158  	return l.cfg.Peer
  2159  }
  2160  
  2161  // ChannelPoint returns the channel outpoint for the channel link.
  2162  // NOTE: Part of the ChannelLink interface.
  2163  func (l *channelLink) ChannelPoint() *wire.OutPoint {
  2164  	return l.channel.ChannelPoint()
  2165  }
  2166  
  2167  // ShortChanID returns the short channel ID for the channel link. The short
  2168  // channel ID encodes the exact location in the main chain that the original
  2169  // funding output can be found.
  2170  //
  2171  // NOTE: Part of the ChannelLink interface.
  2172  func (l *channelLink) ShortChanID() lnwire.ShortChannelID {
  2173  	l.RLock()
  2174  	defer l.RUnlock()
  2175  
  2176  	return l.shortChanID
  2177  }
  2178  
  2179  // UpdateShortChanID updates the short channel ID for a link. This may be
  2180  // required in the event that a link is created before the short chan ID for it
  2181  // is known, or a re-org occurs, and the funding transaction changes location
  2182  // within the chain.
  2183  //
  2184  // NOTE: Part of the ChannelLink interface.
  2185  func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) {
  2186  	chanID := l.ChanID()
  2187  
  2188  	// Refresh the channel state's short channel ID by loading it from disk.
  2189  	// This ensures that the channel state accurately reflects the updated
  2190  	// short channel ID.
  2191  	err := l.channel.State().RefreshShortChanID()
  2192  	if err != nil {
  2193  		l.log.Errorf("unable to refresh short_chan_id for chan_id=%v: "+
  2194  			"%v", chanID, err)
  2195  		return hop.Source, err
  2196  	}
  2197  
  2198  	sid := l.channel.ShortChanID()
  2199  
  2200  	l.log.Infof("updating to short_chan_id=%s for chan_id=%v", sid, chanID)
  2201  
  2202  	l.Lock()
  2203  	l.shortChanID = sid
  2204  	l.Unlock()
  2205  
  2206  	go func() {
  2207  		err := l.cfg.UpdateContractSignals(&contractcourt.ContractSignals{
  2208  			HtlcUpdates: l.htlcUpdates,
  2209  			ShortChanID: sid,
  2210  		})
  2211  		if err != nil {
  2212  			l.log.Errorf("unable to update signals")
  2213  		}
  2214  	}()
  2215  
  2216  	// Now that the short channel ID has been properly updated, we can begin
  2217  	// garbage collecting any forwarding packages we create.
  2218  	l.wg.Add(1)
  2219  	go l.fwdPkgGarbager()
  2220  
  2221  	return sid, nil
  2222  }
  2223  
  2224  // ChanID returns the channel ID for the channel link. The channel ID is a more
  2225  // compact representation of a channel's full outpoint.
  2226  //
  2227  // NOTE: Part of the ChannelLink interface.
  2228  func (l *channelLink) ChanID() lnwire.ChannelID {
  2229  	return lnwire.NewChanIDFromOutPoint(l.channel.ChannelPoint())
  2230  }
  2231  
  2232  // Bandwidth returns the total amount that can flow through the channel link at
  2233  // this given instance. The value returned is expressed in MilliAtom and can
  2234  // be used by callers when making forwarding decisions to determine if a link
  2235  // can accept an HTLC.
  2236  //
  2237  // NOTE: Part of the ChannelLink interface.
  2238  func (l *channelLink) Bandwidth() lnwire.MilliAtom {
  2239  	// Get the balance available on the channel for new HTLCs. This takes
  2240  	// the channel reserve into account so HTLCs up to this value won't
  2241  	// violate it.
  2242  	return l.channel.AvailableBalance()
  2243  }
  2244  
  2245  // MayAddOutgoingHtlc indicates whether we can add an outgoing htlc with the
  2246  // amount provided to the link. This check does not reserve a space, since
  2247  // forwards or other payments may use the available slot, so it should be
  2248  // considered best-effort.
  2249  func (l *channelLink) MayAddOutgoingHtlc(amt lnwire.MilliAtom) error {
  2250  	return l.channel.MayAddOutgoingHtlc(amt)
  2251  }
  2252  
  2253  // getDustSum is a wrapper method that calls the underlying channel's dust sum
  2254  // method.
  2255  //
  2256  // NOTE: Part of the dustHandler interface.
  2257  func (l *channelLink) getDustSum(remote bool) lnwire.MilliAtom {
  2258  	return l.channel.GetDustSum(remote)
  2259  }
  2260  
  2261  // getFeeRate is a wrapper method that retrieves the underlying channel's
  2262  // feerate.
  2263  //
  2264  // NOTE: Part of the dustHandler interface.
  2265  func (l *channelLink) getFeeRate() chainfee.AtomPerKByte {
  2266  	return l.channel.CommitFeeRate()
  2267  }
  2268  
  2269  // getDustClosure returns a closure that can be used by the switch or mailbox
  2270  // to evaluate whether a given HTLC is dust.
  2271  //
  2272  // NOTE: Part of the dustHandler interface.
  2273  func (l *channelLink) getDustClosure() dustClosure {
  2274  	localDustLimit := l.channel.State().LocalChanCfg.DustLimit
  2275  	remoteDustLimit := l.channel.State().RemoteChanCfg.DustLimit
  2276  	chanType := l.channel.State().ChanType
  2277  
  2278  	return dustHelper(chanType, localDustLimit, remoteDustLimit)
  2279  }
  2280  
  2281  // dustClosure is a function that evaluates whether an HTLC is dust. It returns
  2282  // true if the HTLC is dust. It takes in a feerate, a boolean denoting whether
  2283  // the HTLC is incoming (i.e. one that the remote sent), a boolean denoting
  2284  // whether to evaluate on the local or remote commit, and finally an HTLC
  2285  // amount to test.
  2286  type dustClosure func(chainfee.AtomPerKByte, bool, bool, dcrutil.Amount) bool
  2287  
  2288  // dustHelper is used to construct the dustClosure.
  2289  func dustHelper(chantype channeldb.ChannelType, localDustLimit,
  2290  	remoteDustLimit dcrutil.Amount) dustClosure {
  2291  
  2292  	isDust := func(feerate chainfee.AtomPerKByte, incoming,
  2293  		localCommit bool, amt dcrutil.Amount) bool {
  2294  
  2295  		if localCommit {
  2296  			return lnwallet.HtlcIsDust(
  2297  				chantype, incoming, true, feerate, amt,
  2298  				localDustLimit,
  2299  			)
  2300  		}
  2301  
  2302  		return lnwallet.HtlcIsDust(
  2303  			chantype, incoming, false, feerate, amt,
  2304  			remoteDustLimit,
  2305  		)
  2306  	}
  2307  
  2308  	return isDust
  2309  }
  2310  
  2311  // AttachMailBox updates the current mailbox used by this link, and hooks up
  2312  // the mailbox's message and packet outboxes to the link's upstream and
  2313  // downstream chans, respectively.
  2314  func (l *channelLink) AttachMailBox(mailbox MailBox) {
  2315  	l.Lock()
  2316  	l.mailBox = mailbox
  2317  	l.upstream = mailbox.MessageOutBox()
  2318  	l.downstream = mailbox.PacketOutBox()
  2319  	l.Unlock()
  2320  
  2321  	// Set the mailbox's fee rate. This may be refreshing a feerate that was
  2322  	// never committed.
  2323  	l.mailBox.SetFeeRate(l.getFeeRate())
  2324  
  2325  	// Also set the mailbox's dust closure so that it can query whether HTLC's
  2326  	// are dust given the current feerate.
  2327  	l.mailBox.SetDustClosure(l.getDustClosure())
  2328  }
  2329  
  2330  // UpdateForwardingPolicy updates the forwarding policy for the target
  2331  // ChannelLink. Once updated, the link will use the new forwarding policy to
  2332  // govern if it an incoming HTLC should be forwarded or not. We assume that
  2333  // fields that are zero are intentionally set to zero, so we'll use newPolicy to
  2334  // update all of the link's FwrdingPolicy's values.
  2335  //
  2336  // NOTE: Part of the ChannelLink interface.
  2337  func (l *channelLink) UpdateForwardingPolicy(newPolicy ForwardingPolicy) {
  2338  	l.Lock()
  2339  	defer l.Unlock()
  2340  
  2341  	l.cfg.FwrdingPolicy = newPolicy
  2342  }
  2343  
  2344  // CheckHtlcForward should return a nil error if the passed HTLC details
  2345  // satisfy the current forwarding policy fo the target link. Otherwise,
  2346  // a LinkError with a valid protocol failure message should be returned
  2347  // in order to signal to the source of the HTLC, the policy consistency
  2348  // issue.
  2349  //
  2350  // NOTE: Part of the ChannelLink interface.
  2351  func (l *channelLink) CheckHtlcForward(payHash [32]byte,
  2352  	incomingHtlcAmt, amtToForward lnwire.MilliAtom,
  2353  	incomingTimeout, outgoingTimeout uint32,
  2354  	heightNow uint32) *LinkError {
  2355  
  2356  	l.RLock()
  2357  	policy := l.cfg.FwrdingPolicy
  2358  	l.RUnlock()
  2359  
  2360  	// First check whether the outgoing htlc satisfies the channel policy.
  2361  	err := l.canSendHtlc(
  2362  		policy, payHash, amtToForward, outgoingTimeout, heightNow,
  2363  	)
  2364  	if err != nil {
  2365  		return err
  2366  	}
  2367  
  2368  	// Next, using the amount of the incoming HTLC, we'll calculate the
  2369  	// expected fee this incoming HTLC must carry in order to satisfy the
  2370  	// constraints of the outgoing link.
  2371  	expectedFee := ExpectedFee(policy, amtToForward)
  2372  
  2373  	// If the actual fee is less than our expected fee, then we'll reject
  2374  	// this HTLC as it didn't provide a sufficient amount of fees, or the
  2375  	// values have been tampered with, or the send used incorrect/dated
  2376  	// information to construct the forwarding information for this hop. In
  2377  	// any case, we'll cancel this HTLC.
  2378  	actualFee := incomingHtlcAmt - amtToForward
  2379  	if incomingHtlcAmt < amtToForward || actualFee < expectedFee {
  2380  		l.log.Warnf("outgoing htlc(%x) has insufficient fee: "+
  2381  			"expected %v, got %v",
  2382  			payHash[:], int64(expectedFee), int64(actualFee))
  2383  
  2384  		// As part of the returned error, we'll send our latest routing
  2385  		// policy so the sending node obtains the most up to date data.
  2386  		failure := l.createFailureWithUpdate(
  2387  			func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
  2388  				return lnwire.NewFeeInsufficient(
  2389  					amtToForward, *upd,
  2390  				)
  2391  			},
  2392  		)
  2393  		return NewLinkError(failure)
  2394  	}
  2395  
  2396  	// Finally, we'll ensure that the time-lock on the outgoing HTLC meets
  2397  	// the following constraint: the incoming time-lock minus our time-lock
  2398  	// delta should equal the outgoing time lock. Otherwise, whether the
  2399  	// sender messed up, or an intermediate node tampered with the HTLC.
  2400  	timeDelta := policy.TimeLockDelta
  2401  	if incomingTimeout < outgoingTimeout+timeDelta {
  2402  		l.log.Warnf("incoming htlc(%x) has incorrect time-lock value: "+
  2403  			"expected at least %v block delta, got %v block delta",
  2404  			payHash[:], timeDelta, incomingTimeout-outgoingTimeout)
  2405  
  2406  		// Grab the latest routing policy so the sending node is up to
  2407  		// date with our current policy.
  2408  		failure := l.createFailureWithUpdate(
  2409  			func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
  2410  				return lnwire.NewIncorrectCltvExpiry(
  2411  					incomingTimeout, *upd,
  2412  				)
  2413  			},
  2414  		)
  2415  		return NewLinkError(failure)
  2416  	}
  2417  
  2418  	return nil
  2419  }
  2420  
  2421  // CheckHtlcTransit should return a nil error if the passed HTLC details
  2422  // satisfy the current channel policy.  Otherwise, a LinkError with a valid
  2423  // protocol failure message should be returned in order to signal the
  2424  // violation. This call is intended to be used for locally initiated payments
  2425  // for which there is no corresponding incoming htlc.
  2426  func (l *channelLink) CheckHtlcTransit(payHash [32]byte,
  2427  	amt lnwire.MilliAtom, timeout uint32,
  2428  	heightNow uint32) *LinkError {
  2429  
  2430  	l.RLock()
  2431  	policy := l.cfg.FwrdingPolicy
  2432  	l.RUnlock()
  2433  
  2434  	return l.canSendHtlc(
  2435  		policy, payHash, amt, timeout, heightNow,
  2436  	)
  2437  }
  2438  
  2439  // canSendHtlc checks whether the given htlc parameters satisfy
  2440  // the channel's amount and time lock constraints.
  2441  func (l *channelLink) canSendHtlc(policy ForwardingPolicy,
  2442  	payHash [32]byte, amt lnwire.MilliAtom, timeout uint32,
  2443  	heightNow uint32) *LinkError {
  2444  
  2445  	// As our first sanity check, we'll ensure that the passed HTLC isn't
  2446  	// too small for the next hop. If so, then we'll cancel the HTLC
  2447  	// directly.
  2448  	if amt < policy.MinHTLCOut {
  2449  		l.log.Warnf("outgoing htlc(%x) is too small: min_htlc=%v, "+
  2450  			"htlc_value=%v", payHash[:], policy.MinHTLCOut,
  2451  			amt)
  2452  
  2453  		// As part of the returned error, we'll send our latest routing
  2454  		// policy so the sending node obtains the most up to date data.
  2455  		failure := l.createFailureWithUpdate(
  2456  			func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
  2457  				return lnwire.NewAmountBelowMinimum(
  2458  					amt, *upd,
  2459  				)
  2460  			},
  2461  		)
  2462  		return NewLinkError(failure)
  2463  	}
  2464  
  2465  	// Next, ensure that the passed HTLC isn't too large. If so, we'll
  2466  	// cancel the HTLC directly.
  2467  	if policy.MaxHTLC != 0 && amt > policy.MaxHTLC {
  2468  		l.log.Warnf("outgoing htlc(%x) is too large: max_htlc=%v, "+
  2469  			"htlc_value=%v", payHash[:], policy.MaxHTLC, amt)
  2470  
  2471  		// As part of the returned error, we'll send our latest routing
  2472  		// policy so the sending node obtains the most up-to-date data.
  2473  		failure := l.createFailureWithUpdate(
  2474  			func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
  2475  				return lnwire.NewTemporaryChannelFailure(upd)
  2476  			},
  2477  		)
  2478  		return NewDetailedLinkError(failure, OutgoingFailureHTLCExceedsMax)
  2479  	}
  2480  
  2481  	// We want to avoid offering an HTLC which will expire in the near
  2482  	// future, so we'll reject an HTLC if the outgoing expiration time is
  2483  	// too close to the current height.
  2484  	if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta {
  2485  		l.log.Warnf("htlc(%x) has an expiry that's too soon: "+
  2486  			"outgoing_expiry=%v, best_height=%v", payHash[:],
  2487  			timeout, heightNow)
  2488  		failure := l.createFailureWithUpdate(
  2489  			func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
  2490  				return lnwire.NewExpiryTooSoon(*upd)
  2491  			},
  2492  		)
  2493  		return NewLinkError(failure)
  2494  	}
  2495  
  2496  	// Check absolute max delta.
  2497  	if timeout > l.cfg.MaxOutgoingCltvExpiry+heightNow {
  2498  		l.log.Warnf("outgoing htlc(%x) has a time lock too far in "+
  2499  			"the future: got %v, but maximum is %v", payHash[:],
  2500  			timeout-heightNow, l.cfg.MaxOutgoingCltvExpiry)
  2501  
  2502  		return NewLinkError(&lnwire.FailExpiryTooFar{})
  2503  	}
  2504  
  2505  	// Check to see if there is enough balance in this channel.
  2506  	if amt > l.Bandwidth() {
  2507  		l.log.Warnf("insufficient bandwidth to route htlc: %v is "+
  2508  			"larger than %v", amt, l.Bandwidth())
  2509  		failure := l.createFailureWithUpdate(
  2510  			func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
  2511  				return lnwire.NewTemporaryChannelFailure(upd)
  2512  			},
  2513  		)
  2514  		return NewDetailedLinkError(
  2515  			failure, OutgoingFailureInsufficientBalance,
  2516  		)
  2517  	}
  2518  
  2519  	return nil
  2520  }
  2521  
  2522  // Stats returns the statistics of channel link.
  2523  //
  2524  // NOTE: Part of the ChannelLink interface.
  2525  func (l *channelLink) Stats() (uint64, lnwire.MilliAtom, lnwire.MilliAtom) {
  2526  	snapshot := l.channel.StateSnapshot()
  2527  
  2528  	return snapshot.ChannelCommitment.CommitHeight,
  2529  		snapshot.TotalMAtomsSent,
  2530  		snapshot.TotalMAtomsReceived
  2531  }
  2532  
  2533  // String returns the string representation of channel link.
  2534  //
  2535  // NOTE: Part of the ChannelLink interface.
  2536  func (l *channelLink) String() string {
  2537  	return l.channel.ChannelPoint().String()
  2538  }
  2539  
  2540  // handleSwitchPacket handles the switch packets. This packets which might be
  2541  // forwarded to us from another channel link in case the htlc update came from
  2542  // another peer or if the update was created by user
  2543  //
  2544  // NOTE: Part of the packetHandler interface.
  2545  func (l *channelLink) handleSwitchPacket(pkt *htlcPacket) error {
  2546  	l.log.Tracef("received switch packet inkey=%v, outkey=%v",
  2547  		pkt.inKey(), pkt.outKey())
  2548  
  2549  	return l.mailBox.AddPacket(pkt)
  2550  }
  2551  
  2552  // handleLocalAddPacket handles a locally-initiated UpdateAddHTLC packet. It
  2553  // will be processed synchronously.
  2554  //
  2555  // NOTE: Part of the packetHandler interface.
  2556  func (l *channelLink) handleLocalAddPacket(pkt *htlcPacket) error {
  2557  	l.log.Tracef("received switch packet outkey=%v", pkt.outKey())
  2558  
  2559  	// Create a buffered result channel to prevent the link from blocking.
  2560  	errChan := make(chan error, 1)
  2561  
  2562  	select {
  2563  	case l.localUpdateAdd <- &localUpdateAddMsg{
  2564  		pkt: pkt,
  2565  		err: errChan,
  2566  	}:
  2567  	case <-l.quit:
  2568  		return ErrLinkShuttingDown
  2569  	}
  2570  
  2571  	select {
  2572  	case err := <-errChan:
  2573  		return err
  2574  	case <-l.quit:
  2575  		return ErrLinkShuttingDown
  2576  	}
  2577  }
  2578  
  2579  // HandleChannelUpdate handles the htlc requests as settle/add/fail which sent
  2580  // to us from remote peer we have a channel with.
  2581  //
  2582  // NOTE: Part of the ChannelLink interface.
  2583  func (l *channelLink) HandleChannelUpdate(message lnwire.Message) {
  2584  	l.mailBox.AddMessage(message)
  2585  }
  2586  
  2587  // ShutdownIfChannelClean triggers a link shutdown if the channel is in a clean
  2588  // state and errors if the channel has lingering updates.
  2589  //
  2590  // NOTE: Part of the ChannelUpdateHandler interface.
  2591  func (l *channelLink) ShutdownIfChannelClean() error {
  2592  	errChan := make(chan error, 1)
  2593  
  2594  	select {
  2595  	case l.shutdownRequest <- &shutdownReq{
  2596  		err: errChan,
  2597  	}:
  2598  	case <-l.quit:
  2599  		return ErrLinkShuttingDown
  2600  	}
  2601  
  2602  	select {
  2603  	case err := <-errChan:
  2604  		return err
  2605  	case <-l.quit:
  2606  		return ErrLinkShuttingDown
  2607  	}
  2608  }
  2609  
  2610  // updateChannelFee updates the commitment fee-per-kw on this channel by
  2611  // committing to an update_fee message.
  2612  func (l *channelLink) updateChannelFee(feePerKB chainfee.AtomPerKByte) error {
  2613  
  2614  	l.log.Infof("updating commit fee to %s", feePerKB)
  2615  
  2616  	// We skip sending the UpdateFee message if the channel is not
  2617  	// currently eligible to forward messages.
  2618  	if !l.EligibleToForward() {
  2619  		l.log.Debugf("skipping fee update for inactive channel")
  2620  		return nil
  2621  	}
  2622  
  2623  	// First, we'll update the local fee on our commitment.
  2624  	if err := l.channel.UpdateFee(feePerKB); err != nil {
  2625  		return err
  2626  	}
  2627  
  2628  	// The fee passed the channel's validation checks, so we update the
  2629  	// mailbox feerate.
  2630  	l.mailBox.SetFeeRate(feePerKB)
  2631  
  2632  	// We'll then attempt to send a new UpdateFee message, and also lock it
  2633  	// in immediately by triggering a commitment update.
  2634  	msg := lnwire.NewUpdateFee(l.ChanID(), uint32(feePerKB))
  2635  	if err := l.cfg.Peer.SendMessage(false, msg); err != nil {
  2636  		return err
  2637  	}
  2638  	return l.updateCommitTx()
  2639  }
  2640  
  2641  // processRemoteSettleFails accepts a batch of settle/fail payment descriptors
  2642  // after receiving a revocation from the remote party, and reprocesses them in
  2643  // the context of the provided forwarding package. Any settles or fails that
  2644  // have already been acknowledged in the forwarding package will not be sent to
  2645  // the switch.
  2646  func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg,
  2647  	settleFails []*lnwallet.PaymentDescriptor) {
  2648  
  2649  	if len(settleFails) == 0 {
  2650  		return
  2651  	}
  2652  
  2653  	l.log.Debugf("settle-fail-filter %v", fwdPkg.SettleFailFilter)
  2654  
  2655  	var switchPackets []*htlcPacket
  2656  	for i, pd := range settleFails {
  2657  		// Skip any settles or fails that have already been
  2658  		// acknowledged by the incoming link that originated the
  2659  		// forwarded Add.
  2660  		if fwdPkg.SettleFailFilter.Contains(uint16(i)) {
  2661  			continue
  2662  		}
  2663  
  2664  		// TODO(roasbeef): rework log entries to a shared
  2665  		// interface.
  2666  
  2667  		switch pd.EntryType {
  2668  
  2669  		// A settle for an HTLC we previously forwarded HTLC has been
  2670  		// received. So we'll forward the HTLC to the switch which will
  2671  		// handle propagating the settle to the prior hop.
  2672  		case lnwallet.Settle:
  2673  			// If hodl.SettleIncoming is requested, we will not
  2674  			// forward the SETTLE to the switch and will not signal
  2675  			// a free slot on the commitment transaction.
  2676  			if l.cfg.HodlMask.Active(hodl.SettleIncoming) {
  2677  				l.log.Warnf(hodl.SettleIncoming.Warning())
  2678  				continue
  2679  			}
  2680  
  2681  			settlePacket := &htlcPacket{
  2682  				outgoingChanID: l.ShortChanID(),
  2683  				outgoingHTLCID: pd.ParentIndex,
  2684  				destRef:        pd.DestRef,
  2685  				htlc: &lnwire.UpdateFulfillHTLC{
  2686  					PaymentPreimage: pd.RPreimage,
  2687  				},
  2688  			}
  2689  
  2690  			// Add the packet to the batch to be forwarded, and
  2691  			// notify the overflow queue that a spare spot has been
  2692  			// freed up within the commitment state.
  2693  			switchPackets = append(switchPackets, settlePacket)
  2694  
  2695  		// A failureCode message for a previously forwarded HTLC has
  2696  		// been received. As a result a new slot will be freed up in
  2697  		// our commitment state, so we'll forward this to the switch so
  2698  		// the backwards undo can continue.
  2699  		case lnwallet.Fail:
  2700  			// If hodl.SettleIncoming is requested, we will not
  2701  			// forward the FAIL to the switch and will not signal a
  2702  			// free slot on the commitment transaction.
  2703  			if l.cfg.HodlMask.Active(hodl.FailIncoming) {
  2704  				l.log.Warnf(hodl.FailIncoming.Warning())
  2705  				continue
  2706  			}
  2707  
  2708  			// Fetch the reason the HTLC was canceled so we can
  2709  			// continue to propagate it. This failure originated
  2710  			// from another node, so the linkFailure field is not
  2711  			// set on the packet.
  2712  			failPacket := &htlcPacket{
  2713  				outgoingChanID: l.ShortChanID(),
  2714  				outgoingHTLCID: pd.ParentIndex,
  2715  				destRef:        pd.DestRef,
  2716  				htlc: &lnwire.UpdateFailHTLC{
  2717  					Reason: lnwire.OpaqueReason(
  2718  						pd.FailReason,
  2719  					),
  2720  				},
  2721  			}
  2722  
  2723  			l.log.Debugf("Failed to send %s", pd.Amount)
  2724  
  2725  			// If the failure message lacks an HMAC (but includes
  2726  			// the 4 bytes for encoding the message and padding
  2727  			// lengths, then this means that we received it as an
  2728  			// UpdateFailMalformedHTLC. As a result, we'll signal
  2729  			// that we need to convert this error within the switch
  2730  			// to an actual error, by encrypting it as if we were
  2731  			// the originating hop.
  2732  			convertedErrorSize := lnwire.FailureMessageLength + 4
  2733  			if len(pd.FailReason) == convertedErrorSize {
  2734  				failPacket.convertedError = true
  2735  			}
  2736  
  2737  			// Add the packet to the batch to be forwarded, and
  2738  			// notify the overflow queue that a spare spot has been
  2739  			// freed up within the commitment state.
  2740  			switchPackets = append(switchPackets, failPacket)
  2741  		}
  2742  	}
  2743  
  2744  	// Only spawn the task forward packets we have a non-zero number.
  2745  	if len(switchPackets) > 0 {
  2746  		go l.forwardBatch(switchPackets...)
  2747  	}
  2748  }
  2749  
  2750  // processRemoteAdds serially processes each of the Add payment descriptors
  2751  // which have been "locked-in" by receiving a revocation from the remote party.
  2752  // The forwarding package provided instructs how to process this batch,
  2753  // indicating whether this is the first time these Adds are being processed, or
  2754  // whether we are reprocessing as a result of a failure or restart. Adds that
  2755  // have already been acknowledged in the forwarding package will be ignored.
  2756  func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
  2757  	lockedInHtlcs []*lnwallet.PaymentDescriptor) {
  2758  
  2759  	l.log.Tracef("processing %d remote adds for height %d",
  2760  		len(lockedInHtlcs), fwdPkg.Height)
  2761  
  2762  	decodeReqs := make(
  2763  		[]hop.DecodeHopIteratorRequest, 0, len(lockedInHtlcs),
  2764  	)
  2765  	for _, pd := range lockedInHtlcs {
  2766  		switch pd.EntryType {
  2767  
  2768  		// TODO(conner): remove type switch?
  2769  		case lnwallet.Add:
  2770  			// Before adding the new htlc to the state machine,
  2771  			// parse the onion object in order to obtain the
  2772  			// routing information with DecodeHopIterator function
  2773  			// which process the Sphinx packet.
  2774  			onionReader := bytes.NewReader(pd.OnionBlob)
  2775  
  2776  			req := hop.DecodeHopIteratorRequest{
  2777  				OnionReader:  onionReader,
  2778  				RHash:        pd.RHash[:],
  2779  				IncomingCltv: pd.Timeout,
  2780  			}
  2781  
  2782  			decodeReqs = append(decodeReqs, req)
  2783  		}
  2784  	}
  2785  
  2786  	// Atomically decode the incoming htlcs, simultaneously checking for
  2787  	// replay attempts. A particular index in the returned, spare list of
  2788  	// channel iterators should only be used if the failure code at the
  2789  	// same index is lnwire.FailCodeNone.
  2790  	decodeResps, sphinxErr := l.cfg.DecodeHopIterators(
  2791  		fwdPkg.ID(), decodeReqs,
  2792  	)
  2793  	if sphinxErr != nil {
  2794  		l.fail(LinkFailureError{code: ErrInternalError},
  2795  			"unable to decode hop iterators: %v", sphinxErr)
  2796  		return
  2797  	}
  2798  
  2799  	var switchPackets []*htlcPacket
  2800  
  2801  	for i, pd := range lockedInHtlcs {
  2802  		idx := uint16(i)
  2803  
  2804  		if fwdPkg.State == channeldb.FwdStateProcessed &&
  2805  			fwdPkg.AckFilter.Contains(idx) {
  2806  
  2807  			// If this index is already found in the ack filter,
  2808  			// the response to this forwarding decision has already
  2809  			// been committed by one of our commitment txns. ADDs
  2810  			// in this state are waiting for the rest of the fwding
  2811  			// package to get acked before being garbage collected.
  2812  			continue
  2813  		}
  2814  
  2815  		// An incoming HTLC add has been full-locked in. As a result we
  2816  		// can now examine the forwarding details of the HTLC, and the
  2817  		// HTLC itself to decide if: we should forward it, cancel it,
  2818  		// or are able to settle it (and it adheres to our fee related
  2819  		// constraints).
  2820  
  2821  		// Fetch the onion blob that was included within this processed
  2822  		// payment descriptor.
  2823  		var onionBlob [lnwire.OnionPacketSize]byte
  2824  		copy(onionBlob[:], pd.OnionBlob)
  2825  
  2826  		// Before adding the new htlc to the state machine, parse the
  2827  		// onion object in order to obtain the routing information with
  2828  		// DecodeHopIterator function which process the Sphinx packet.
  2829  		chanIterator, failureCode := decodeResps[i].Result()
  2830  		if failureCode != lnwire.CodeNone {
  2831  			// If we're unable to process the onion blob than we
  2832  			// should send the malformed htlc error to payment
  2833  			// sender.
  2834  			l.sendMalformedHTLCError(pd.HtlcIndex, failureCode,
  2835  				onionBlob[:], pd.SourceRef)
  2836  
  2837  			l.log.Errorf("unable to decode onion hop "+
  2838  				"iterator: %v", failureCode)
  2839  			continue
  2840  		}
  2841  
  2842  		// Retrieve onion obfuscator from onion blob in order to
  2843  		// produce initial obfuscation of the onion failureCode.
  2844  		obfuscator, failureCode := chanIterator.ExtractErrorEncrypter(
  2845  			l.cfg.ExtractErrorEncrypter,
  2846  		)
  2847  		if failureCode != lnwire.CodeNone {
  2848  			// If we're unable to process the onion blob than we
  2849  			// should send the malformed htlc error to payment
  2850  			// sender.
  2851  			l.sendMalformedHTLCError(
  2852  				pd.HtlcIndex, failureCode, onionBlob[:], pd.SourceRef,
  2853  			)
  2854  
  2855  			l.log.Errorf("unable to decode onion "+
  2856  				"obfuscator: %v", failureCode)
  2857  			continue
  2858  		}
  2859  
  2860  		heightNow := l.cfg.BestHeight()
  2861  
  2862  		pld, err := chanIterator.HopPayload()
  2863  		if err != nil {
  2864  			// If we're unable to process the onion payload, or we
  2865  			// received invalid onion payload failure, then we
  2866  			// should send an error back to the caller so the HTLC
  2867  			// can be canceled.
  2868  			var failedType uint64
  2869  			if e, ok := err.(hop.ErrInvalidPayload); ok {
  2870  				failedType = uint64(e.Type)
  2871  			}
  2872  
  2873  			// TODO: currently none of the test unit infrastructure
  2874  			// is setup to handle TLV payloads, so testing this
  2875  			// would require implementing a separate mock iterator
  2876  			// for TLV payloads that also supports injecting invalid
  2877  			// payloads. Deferring this non-trival effort till a
  2878  			// later date
  2879  			failure := lnwire.NewInvalidOnionPayload(failedType, 0)
  2880  			l.sendHTLCError(
  2881  				pd, NewLinkError(failure), obfuscator, false,
  2882  			)
  2883  
  2884  			l.log.Errorf("unable to decode forwarding "+
  2885  				"instructions: %v", err)
  2886  			continue
  2887  		}
  2888  
  2889  		fwdInfo := pld.ForwardingInfo()
  2890  
  2891  		switch fwdInfo.NextHop {
  2892  		case hop.Exit:
  2893  			err := l.processExitHop(
  2894  				pd, obfuscator, fwdInfo, heightNow, pld,
  2895  			)
  2896  			if err != nil {
  2897  				l.fail(LinkFailureError{code: ErrInternalError},
  2898  					err.Error(),
  2899  				)
  2900  
  2901  				return
  2902  			}
  2903  
  2904  		// There are additional channels left within this route. So
  2905  		// we'll simply do some forwarding package book-keeping.
  2906  		default:
  2907  			// If hodl.AddIncoming is requested, we will not
  2908  			// validate the forwarded ADD, nor will we send the
  2909  			// packet to the htlc switch.
  2910  			if l.cfg.HodlMask.Active(hodl.AddIncoming) {
  2911  				l.log.Warnf(hodl.AddIncoming.Warning())
  2912  				continue
  2913  			}
  2914  
  2915  			switch fwdPkg.State {
  2916  			case channeldb.FwdStateProcessed:
  2917  				// This add was not forwarded on the previous
  2918  				// processing phase, run it through our
  2919  				// validation pipeline to reproduce an error.
  2920  				// This may trigger a different error due to
  2921  				// expiring timelocks, but we expect that an
  2922  				// error will be reproduced.
  2923  				if !fwdPkg.FwdFilter.Contains(idx) {
  2924  					break
  2925  				}
  2926  
  2927  				// Otherwise, it was already processed, we can
  2928  				// can collect it and continue.
  2929  				addMsg := &lnwire.UpdateAddHTLC{
  2930  					Expiry:      fwdInfo.OutgoingCTLV,
  2931  					Amount:      fwdInfo.AmountToForward,
  2932  					PaymentHash: pd.RHash,
  2933  				}
  2934  
  2935  				// Finally, we'll encode the onion packet for
  2936  				// the _next_ hop using the hop iterator
  2937  				// decoded for the current hop.
  2938  				buf := bytes.NewBuffer(addMsg.OnionBlob[0:0])
  2939  
  2940  				// We know this cannot fail, as this ADD
  2941  				// was marked forwarded in a previous
  2942  				// round of processing.
  2943  				chanIterator.EncodeNextHop(buf)
  2944  
  2945  				updatePacket := &htlcPacket{
  2946  					incomingChanID:  l.ShortChanID(),
  2947  					incomingHTLCID:  pd.HtlcIndex,
  2948  					outgoingChanID:  fwdInfo.NextHop,
  2949  					sourceRef:       pd.SourceRef,
  2950  					incomingAmount:  pd.Amount,
  2951  					amount:          addMsg.Amount,
  2952  					htlc:            addMsg,
  2953  					obfuscator:      obfuscator,
  2954  					incomingTimeout: pd.Timeout,
  2955  					outgoingTimeout: fwdInfo.OutgoingCTLV,
  2956  					customRecords:   pld.CustomRecords(),
  2957  				}
  2958  				switchPackets = append(
  2959  					switchPackets, updatePacket,
  2960  				)
  2961  
  2962  				continue
  2963  			}
  2964  
  2965  			// TODO(roasbeef): ensure don't accept outrageous
  2966  			// timeout for htlc
  2967  
  2968  			// With all our forwarding constraints met, we'll
  2969  			// create the outgoing HTLC using the parameters as
  2970  			// specified in the forwarding info.
  2971  			addMsg := &lnwire.UpdateAddHTLC{
  2972  				Expiry:      fwdInfo.OutgoingCTLV,
  2973  				Amount:      fwdInfo.AmountToForward,
  2974  				PaymentHash: pd.RHash,
  2975  			}
  2976  
  2977  			// Finally, we'll encode the onion packet for the
  2978  			// _next_ hop using the hop iterator decoded for the
  2979  			// current hop.
  2980  			buf := bytes.NewBuffer(addMsg.OnionBlob[0:0])
  2981  			err := chanIterator.EncodeNextHop(buf)
  2982  			if err != nil {
  2983  				l.log.Errorf("unable to encode the "+
  2984  					"remaining route %v", err)
  2985  
  2986  				failure := l.createFailureWithUpdate(
  2987  					func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
  2988  						return lnwire.NewTemporaryChannelFailure(
  2989  							upd,
  2990  						)
  2991  					},
  2992  				)
  2993  
  2994  				l.sendHTLCError(
  2995  					pd, NewLinkError(failure), obfuscator, false,
  2996  				)
  2997  				continue
  2998  			}
  2999  
  3000  			// Now that this add has been reprocessed, only append
  3001  			// it to our list of packets to forward to the switch
  3002  			// this is the first time processing the add. If the
  3003  			// fwd pkg has already been processed, then we entered
  3004  			// the above section to recreate a previous error.  If
  3005  			// the packet had previously been forwarded, it would
  3006  			// have been added to switchPackets at the top of this
  3007  			// section.
  3008  			if fwdPkg.State == channeldb.FwdStateLockedIn {
  3009  				updatePacket := &htlcPacket{
  3010  					incomingChanID:  l.ShortChanID(),
  3011  					incomingHTLCID:  pd.HtlcIndex,
  3012  					outgoingChanID:  fwdInfo.NextHop,
  3013  					sourceRef:       pd.SourceRef,
  3014  					incomingAmount:  pd.Amount,
  3015  					amount:          addMsg.Amount,
  3016  					htlc:            addMsg,
  3017  					obfuscator:      obfuscator,
  3018  					incomingTimeout: pd.Timeout,
  3019  					outgoingTimeout: fwdInfo.OutgoingCTLV,
  3020  					customRecords:   pld.CustomRecords(),
  3021  				}
  3022  
  3023  				fwdPkg.FwdFilter.Set(idx)
  3024  				switchPackets = append(switchPackets,
  3025  					updatePacket)
  3026  			}
  3027  		}
  3028  	}
  3029  
  3030  	// Commit the htlcs we are intending to forward if this package has not
  3031  	// been fully processed.
  3032  	if fwdPkg.State == channeldb.FwdStateLockedIn {
  3033  		err := l.channel.SetFwdFilter(fwdPkg.Height, fwdPkg.FwdFilter)
  3034  		if err != nil {
  3035  			l.fail(LinkFailureError{code: ErrInternalError},
  3036  				"unable to set fwd filter: %v", err)
  3037  			return
  3038  		}
  3039  	}
  3040  
  3041  	if len(switchPackets) == 0 {
  3042  		return
  3043  	}
  3044  
  3045  	l.log.Debugf("forwarding %d packets to switch", len(switchPackets))
  3046  
  3047  	// NOTE: This call is made synchronous so that we ensure all circuits
  3048  	// are committed in the exact order that they are processed in the link.
  3049  	// Failing to do this could cause reorderings/gaps in the range of
  3050  	// opened circuits, which violates assumptions made by the circuit
  3051  	// trimming.
  3052  	l.forwardBatch(switchPackets...)
  3053  }
  3054  
  3055  // processExitHop handles an htlc for which this link is the exit hop. It
  3056  // returns a boolean indicating whether the commitment tx needs an update.
  3057  func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
  3058  	obfuscator hop.ErrorEncrypter, fwdInfo hop.ForwardingInfo,
  3059  	heightNow uint32, payload invoices.Payload) error {
  3060  
  3061  	// If hodl.ExitSettle is requested, we will not validate the final hop's
  3062  	// ADD, nor will we settle the corresponding invoice or respond with the
  3063  	// preimage.
  3064  	if l.cfg.HodlMask.Active(hodl.ExitSettle) {
  3065  		l.log.Warnf(hodl.ExitSettle.Warning())
  3066  
  3067  		return nil
  3068  	}
  3069  
  3070  	// As we're the exit hop, we'll double check the hop-payload included in
  3071  	// the HTLC to ensure that it was crafted correctly by the sender and
  3072  	// matches the HTLC we were extended.
  3073  	if pd.Amount != fwdInfo.AmountToForward {
  3074  
  3075  		l.log.Errorf("onion payload of incoming htlc(%x) has incorrect "+
  3076  			"value: expected %v, got %v", pd.RHash,
  3077  			pd.Amount, fwdInfo.AmountToForward)
  3078  
  3079  		failure := NewLinkError(
  3080  			lnwire.NewFinalIncorrectHtlcAmount(pd.Amount),
  3081  		)
  3082  		l.sendHTLCError(pd, failure, obfuscator, true)
  3083  
  3084  		return nil
  3085  	}
  3086  
  3087  	// We'll also ensure that our time-lock value has been computed
  3088  	// correctly.
  3089  	if pd.Timeout != fwdInfo.OutgoingCTLV {
  3090  		l.log.Errorf("onion payload of incoming htlc(%x) has incorrect "+
  3091  			"time-lock: expected %v, got %v",
  3092  			pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV)
  3093  
  3094  		failure := NewLinkError(
  3095  			lnwire.NewFinalIncorrectCltvExpiry(pd.Timeout),
  3096  		)
  3097  		l.sendHTLCError(pd, failure, obfuscator, true)
  3098  
  3099  		return nil
  3100  	}
  3101  
  3102  	// Notify the invoiceRegistry of the exit hop htlc. If we crash right
  3103  	// after this, this code will be re-executed after restart. We will
  3104  	// receive back a resolution event.
  3105  	invoiceHash := lntypes.Hash(pd.RHash)
  3106  
  3107  	circuitKey := channeldb.CircuitKey{
  3108  		ChanID: l.ShortChanID(),
  3109  		HtlcID: pd.HtlcIndex,
  3110  	}
  3111  
  3112  	event, err := l.cfg.Registry.NotifyExitHopHtlc(
  3113  		invoiceHash, pd.Amount, pd.Timeout, int32(heightNow),
  3114  		circuitKey, l.hodlQueue.ChanIn(), payload,
  3115  	)
  3116  	if err != nil {
  3117  		return err
  3118  	}
  3119  
  3120  	// Create a hodlHtlc struct and decide either resolved now or later.
  3121  	htlc := hodlHtlc{
  3122  		pd:         pd,
  3123  		obfuscator: obfuscator,
  3124  	}
  3125  
  3126  	// If the event is nil, the invoice is being held, so we save payment
  3127  	// descriptor for future reference.
  3128  	if event == nil {
  3129  		l.hodlMap[circuitKey] = htlc
  3130  		return nil
  3131  	}
  3132  
  3133  	// Process the received resolution.
  3134  	return l.processHtlcResolution(event, htlc)
  3135  }
  3136  
  3137  // settleHTLC settles the HTLC on the channel.
  3138  func (l *channelLink) settleHTLC(preimage lntypes.Preimage,
  3139  	pd *lnwallet.PaymentDescriptor) error {
  3140  
  3141  	hash := preimage.Hash()
  3142  
  3143  	l.log.Infof("settling htlc %v as exit hop", hash)
  3144  
  3145  	err := l.channel.SettleHTLC(
  3146  		preimage, pd.HtlcIndex, pd.SourceRef, nil, nil,
  3147  	)
  3148  	if err != nil {
  3149  		return fmt.Errorf("unable to settle htlc: %v", err)
  3150  	}
  3151  
  3152  	// If the link is in hodl.BogusSettle mode, replace the preimage with a
  3153  	// fake one before sending it to the peer.
  3154  	if l.cfg.HodlMask.Active(hodl.BogusSettle) {
  3155  		l.log.Warnf(hodl.BogusSettle.Warning())
  3156  		preimage = [32]byte{}
  3157  		copy(preimage[:], bytes.Repeat([]byte{2}, 32))
  3158  	}
  3159  
  3160  	// HTLC was successfully settled locally send notification about it
  3161  	// remote peer.
  3162  	l.cfg.Peer.SendMessage(false, &lnwire.UpdateFulfillHTLC{
  3163  		ChanID:          l.ChanID(),
  3164  		ID:              pd.HtlcIndex,
  3165  		PaymentPreimage: preimage,
  3166  	})
  3167  
  3168  	// Once we have successfully settled the htlc, notify a settle event.
  3169  	l.cfg.HtlcNotifier.NotifySettleEvent(
  3170  		HtlcKey{
  3171  			IncomingCircuit: channeldb.CircuitKey{
  3172  				ChanID: l.ShortChanID(),
  3173  				HtlcID: pd.HtlcIndex,
  3174  			},
  3175  		},
  3176  		preimage,
  3177  		HtlcEventTypeReceive,
  3178  	)
  3179  
  3180  	return nil
  3181  }
  3182  
  3183  // forwardBatch forwards the given htlcPackets to the switch, and waits on the
  3184  // err chan for the individual responses. This method is intended to be spawned
  3185  // as a goroutine so the responses can be handled in the background.
  3186  func (l *channelLink) forwardBatch(packets ...*htlcPacket) {
  3187  	// Don't forward packets for which we already have a response in our
  3188  	// mailbox. This could happen if a packet fails and is buffered in the
  3189  	// mailbox, and the incoming link flaps.
  3190  	var filteredPkts = make([]*htlcPacket, 0, len(packets))
  3191  	for _, pkt := range packets {
  3192  		if l.mailBox.HasPacket(pkt.inKey()) {
  3193  			continue
  3194  		}
  3195  
  3196  		filteredPkts = append(filteredPkts, pkt)
  3197  	}
  3198  
  3199  	if err := l.cfg.ForwardPackets(l.quit, filteredPkts...); err != nil {
  3200  		log.Errorf("Unhandled error while reforwarding htlc "+
  3201  			"settle/fail over htlcswitch: %v", err)
  3202  	}
  3203  }
  3204  
  3205  // sendHTLCError functions cancels HTLC and send cancel message back to the
  3206  // peer from which HTLC was received.
  3207  func (l *channelLink) sendHTLCError(pd *lnwallet.PaymentDescriptor,
  3208  	failure *LinkError, e hop.ErrorEncrypter, isReceive bool) {
  3209  
  3210  	reason, err := e.EncryptFirstHop(failure.WireMessage())
  3211  	if err != nil {
  3212  		l.log.Errorf("unable to obfuscate error: %v", err)
  3213  		return
  3214  	}
  3215  
  3216  	err = l.channel.FailHTLC(pd.HtlcIndex, reason, pd.SourceRef, nil, nil)
  3217  	if err != nil {
  3218  		l.log.Errorf("unable cancel htlc: %v", err)
  3219  		return
  3220  	}
  3221  
  3222  	l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailHTLC{
  3223  		ChanID: l.ChanID(),
  3224  		ID:     pd.HtlcIndex,
  3225  		Reason: reason,
  3226  	})
  3227  
  3228  	// Notify a link failure on our incoming link. Outgoing htlc information
  3229  	// is not available at this point, because we have not decrypted the
  3230  	// onion, so it is excluded.
  3231  	var eventType HtlcEventType
  3232  	if isReceive {
  3233  		eventType = HtlcEventTypeReceive
  3234  	} else {
  3235  		eventType = HtlcEventTypeForward
  3236  	}
  3237  
  3238  	l.cfg.HtlcNotifier.NotifyLinkFailEvent(
  3239  		HtlcKey{
  3240  			IncomingCircuit: channeldb.CircuitKey{
  3241  				ChanID: l.ShortChanID(),
  3242  				HtlcID: pd.HtlcIndex,
  3243  			},
  3244  		},
  3245  		HtlcInfo{
  3246  			IncomingTimeLock: pd.Timeout,
  3247  			IncomingAmt:      pd.Amount,
  3248  		},
  3249  		eventType,
  3250  		failure,
  3251  		true,
  3252  	)
  3253  }
  3254  
  3255  // sendMalformedHTLCError helper function which sends the malformed HTLC update
  3256  // to the payment sender.
  3257  func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64,
  3258  	code lnwire.FailCode, onionBlob []byte, sourceRef *channeldb.AddRef) {
  3259  
  3260  	shaOnionBlob := sha256.Sum256(onionBlob)
  3261  	err := l.channel.MalformedFailHTLC(htlcIndex, code, shaOnionBlob, sourceRef)
  3262  	if err != nil {
  3263  		l.log.Errorf("unable cancel htlc: %v", err)
  3264  		return
  3265  	}
  3266  
  3267  	l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailMalformedHTLC{
  3268  		ChanID:       l.ChanID(),
  3269  		ID:           htlcIndex,
  3270  		ShaOnionBlob: shaOnionBlob,
  3271  		FailureCode:  code,
  3272  	})
  3273  }
  3274  
  3275  // fail is a function which is used to encapsulate the action necessary for
  3276  // properly failing the link. It takes a LinkFailureError, which will be passed
  3277  // to the OnChannelFailure closure, in order for it to determine if we should
  3278  // force close the channel, and if we should send an error message to the
  3279  // remote peer.
  3280  func (l *channelLink) fail(linkErr LinkFailureError,
  3281  	format string, a ...interface{}) {
  3282  	reason := errors.Errorf(format, a...)
  3283  
  3284  	// Return if we have already notified about a failure.
  3285  	if l.failed {
  3286  		l.log.Warnf("ignoring link failure (%v), as link already "+
  3287  			"failed", reason)
  3288  		return
  3289  	}
  3290  
  3291  	l.log.Errorf("failing link: %s with error: %v", reason, linkErr)
  3292  
  3293  	// Set failed, such that we won't process any more updates, and notify
  3294  	// the peer about the failure.
  3295  	l.failed = true
  3296  	l.cfg.OnChannelFailure(l.ChanID(), l.ShortChanID(), linkErr)
  3297  }