github.com/decred/dcrlnd@v0.7.6/contractcourt/channel_arbitrator.go (about)

     1  package contractcourt
     2  
     3  import (
     4  	"bytes"
     5  	"errors"
     6  	"fmt"
     7  	"math"
     8  	"sync"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/davecgh/go-spew/spew"
    13  	"github.com/decred/dcrd/chaincfg/chainhash"
    14  	"github.com/decred/dcrd/dcrutil/v4"
    15  	"github.com/decred/dcrd/wire"
    16  	"github.com/decred/dcrlnd/channeldb"
    17  	"github.com/decred/dcrlnd/input"
    18  	"github.com/decred/dcrlnd/kvdb"
    19  	"github.com/decred/dcrlnd/labels"
    20  	"github.com/decred/dcrlnd/lntypes"
    21  	"github.com/decred/dcrlnd/lnwallet"
    22  	"github.com/decred/dcrlnd/lnwire"
    23  	"github.com/decred/dcrlnd/sweep"
    24  )
    25  
    26  var (
    27  	// errAlreadyForceClosed is an error returned when we attempt to force
    28  	// close a channel that's already in the process of doing so.
    29  	errAlreadyForceClosed = errors.New("channel is already in the " +
    30  		"process of being force closed")
    31  )
    32  
    33  const (
    34  	// anchorSweepConfTarget is the conf target used when sweeping
    35  	// commitment anchors. This value is only used when the commitment
    36  	// transaction has no valid HTLCs for determining a confirmation
    37  	// deadline.
    38  	anchorSweepConfTarget = 144
    39  
    40  	// arbitratorBlockBufferSize is the size of the buffer we give to each
    41  	// channel arbitrator.
    42  	arbitratorBlockBufferSize = 20
    43  )
    44  
    45  // WitnessSubscription represents an intent to be notified once new witnesses
    46  // are discovered by various active contract resolvers. A contract resolver may
    47  // use this to be notified of when it can satisfy an incoming contract after we
    48  // discover the witness for an outgoing contract.
    49  type WitnessSubscription struct {
    50  	// WitnessUpdates is a channel that newly discovered witnesses will be
    51  	// sent over.
    52  	//
    53  	// TODO(roasbeef): couple with WitnessType?
    54  	WitnessUpdates <-chan lntypes.Preimage
    55  
    56  	// CancelSubscription is a function closure that should be used by a
    57  	// client to cancel the subscription once they are no longer interested
    58  	// in receiving new updates.
    59  	CancelSubscription func()
    60  }
    61  
    62  // WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
    63  // this interface to lookup witnesses (preimages typically) of contracts
    64  // they're trying to resolve, add new preimages they resolve, and finally
    65  // receive new updates each new time a preimage is discovered.
    66  //
    67  // TODO(roasbeef): need to delete the pre-images once we've used them
    68  // and have been sufficiently confirmed?
    69  type WitnessBeacon interface {
    70  	// SubscribeUpdates returns a channel that will be sent upon *each* time
    71  	// a new preimage is discovered.
    72  	SubscribeUpdates() *WitnessSubscription
    73  
    74  	// LookupPreImage attempts to lookup a preimage in the global cache.
    75  	// True is returned for the second argument if the preimage is found.
    76  	LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool)
    77  
    78  	// AddPreimages adds a batch of newly discovered preimages to the global
    79  	// cache, and also signals any subscribers of the newly discovered
    80  	// witness.
    81  	AddPreimages(preimages ...lntypes.Preimage) error
    82  }
    83  
    84  // ArbChannel is an abstraction that allows the channel arbitrator to interact
    85  // with an open channel.
    86  type ArbChannel interface {
    87  	// ForceCloseChan should force close the contract that this attendant
    88  	// is watching over. We'll use this when we decide that we need to go
    89  	// to chain. It should in addition tell the switch to remove the
    90  	// corresponding link, such that we won't accept any new updates. The
    91  	// returned summary contains all items needed to eventually resolve all
    92  	// outputs on chain.
    93  	ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error)
    94  
    95  	// NewAnchorResolutions returns the anchor resolutions for currently
    96  	// valid commitment transactions.
    97  	NewAnchorResolutions() (*lnwallet.AnchorResolutions, error)
    98  }
    99  
   100  // ChannelArbitratorConfig contains all the functionality that the
   101  // ChannelArbitrator needs in order to properly arbitrate any contract dispute
   102  // on chain.
   103  type ChannelArbitratorConfig struct {
   104  	// ChanPoint is the channel point that uniquely identifies this
   105  	// channel.
   106  	ChanPoint wire.OutPoint
   107  
   108  	// Channel is the full channel data structure. For legacy channels, this
   109  	// field may not always be set after a restart.
   110  	Channel ArbChannel
   111  
   112  	// ShortChanID describes the exact location of the channel within the
   113  	// chain. We'll use this to address any messages that we need to send
   114  	// to the switch during contract resolution.
   115  	ShortChanID lnwire.ShortChannelID
   116  
   117  	// ChainEvents is an active subscription to the chain watcher for this
   118  	// channel to be notified of any on-chain activity related to this
   119  	// channel.
   120  	ChainEvents *ChainEventSubscription
   121  
   122  	// MarkCommitmentBroadcasted should mark the channel as the commitment
   123  	// being broadcast, and we are waiting for the commitment to confirm.
   124  	MarkCommitmentBroadcasted func(*wire.MsgTx, bool) error
   125  
   126  	// MarkChannelClosed marks the channel closed in the database, with the
   127  	// passed close summary. After this method successfully returns we can
   128  	// no longer expect to receive chain events for this channel, and must
   129  	// be able to recover from a failure without getting the close event
   130  	// again. It takes an optional channel status which will update the
   131  	// channel status in the record that we keep of historical channels.
   132  	MarkChannelClosed func(*channeldb.ChannelCloseSummary,
   133  		...channeldb.ChannelStatus) error
   134  
   135  	// IsPendingClose is a boolean indicating whether the channel is marked
   136  	// as pending close in the database.
   137  	IsPendingClose bool
   138  
   139  	// ClosingHeight is the height at which the channel was closed. Note
   140  	// that this value is only valid if IsPendingClose is true.
   141  	ClosingHeight uint32
   142  
   143  	// CloseType is the type of the close event in case IsPendingClose is
   144  	// true. Otherwise this value is unset.
   145  	CloseType channeldb.ClosureType
   146  
   147  	// MarkChannelResolved is a function closure that serves to mark a
   148  	// channel as "fully resolved". A channel itself can be considered
   149  	// fully resolved once all active contracts have individually been
   150  	// fully resolved.
   151  	//
   152  	// TODO(roasbeef): need RPC's to combine for pendingchannels RPC
   153  	MarkChannelResolved func() error
   154  
   155  	// PutResolverReport records a resolver report for the channel. If the
   156  	// transaction provided is nil, the function should write the report
   157  	// in a new transaction.
   158  	PutResolverReport func(tx kvdb.RwTx,
   159  		report *channeldb.ResolverReport) error
   160  
   161  	// FetchHistoricalChannel retrieves the historical state of a channel.
   162  	// This is mostly used to supplement the ContractResolvers with
   163  	// additional information required for proper contract resolution.
   164  	FetchHistoricalChannel func() (*channeldb.OpenChannel, error)
   165  
   166  	ChainArbitratorConfig
   167  }
   168  
   169  // ReportOutputType describes the type of output that is being reported
   170  // on.
   171  type ReportOutputType uint8
   172  
   173  const (
   174  	// ReportOutputIncomingHtlc is an incoming hash time locked contract on
   175  	// the commitment tx.
   176  	ReportOutputIncomingHtlc ReportOutputType = iota
   177  
   178  	// ReportOutputOutgoingHtlc is an outgoing hash time locked contract on
   179  	// the commitment tx.
   180  	ReportOutputOutgoingHtlc
   181  
   182  	// ReportOutputUnencumbered is an uncontested output on the commitment
   183  	// transaction paying to us directly.
   184  	ReportOutputUnencumbered
   185  
   186  	// ReportOutputAnchor is an anchor output on the commitment tx.
   187  	ReportOutputAnchor
   188  )
   189  
   190  // ContractReport provides a summary of a commitment tx output.
   191  type ContractReport struct {
   192  	// Outpoint is the final output that will be swept back to the wallet.
   193  	Outpoint wire.OutPoint
   194  
   195  	// Type indicates the type of the reported output.
   196  	Type ReportOutputType
   197  
   198  	// Amount is the final value that will be swept in back to the wallet.
   199  	Amount dcrutil.Amount
   200  
   201  	// MaturityHeight is the absolute block height that this output will
   202  	// mature at.
   203  	MaturityHeight uint32
   204  
   205  	// Stage indicates whether the htlc is in the CLTV-timeout stage (1) or
   206  	// the CSV-delay stage (2). A stage 1 htlc's maturity height will be set
   207  	// to its expiry height, while a stage 2 htlc's maturity height will be
   208  	// set to its confirmation height plus the maturity requirement.
   209  	Stage uint32
   210  
   211  	// LimboBalance is the total number of frozen coins within this
   212  	// contract.
   213  	LimboBalance dcrutil.Amount
   214  
   215  	// RecoveredBalance is the total value that has been successfully swept
   216  	// back to the user's wallet.
   217  	RecoveredBalance dcrutil.Amount
   218  }
   219  
   220  // resolverReport creates a resolve report using some of the information in the
   221  // contract report.
   222  func (c *ContractReport) resolverReport(spendTx *chainhash.Hash,
   223  	resolverType channeldb.ResolverType,
   224  	outcome channeldb.ResolverOutcome) *channeldb.ResolverReport {
   225  
   226  	return &channeldb.ResolverReport{
   227  		OutPoint:        c.Outpoint,
   228  		Amount:          c.Amount,
   229  		ResolverType:    resolverType,
   230  		ResolverOutcome: outcome,
   231  		SpendTxID:       spendTx,
   232  	}
   233  }
   234  
   235  // htlcSet represents the set of active HTLCs on a given commitment
   236  // transaction.
   237  type htlcSet struct {
   238  	// incomingHTLCs is a map of all incoming HTLCs on the target
   239  	// commitment transaction. We may potentially go onchain to claim the
   240  	// funds sent to us within this set.
   241  	incomingHTLCs map[uint64]channeldb.HTLC
   242  
   243  	// outgoingHTLCs is a map of all outgoing HTLCs on the target
   244  	// commitment transaction. We may potentially go onchain to reclaim the
   245  	// funds that are currently in limbo.
   246  	outgoingHTLCs map[uint64]channeldb.HTLC
   247  }
   248  
   249  // newHtlcSet constructs a new HTLC set from a slice of HTLC's.
   250  func newHtlcSet(htlcs []channeldb.HTLC) htlcSet {
   251  	outHTLCs := make(map[uint64]channeldb.HTLC)
   252  	inHTLCs := make(map[uint64]channeldb.HTLC)
   253  	for _, htlc := range htlcs {
   254  		if htlc.Incoming {
   255  			inHTLCs[htlc.HtlcIndex] = htlc
   256  			continue
   257  		}
   258  
   259  		outHTLCs[htlc.HtlcIndex] = htlc
   260  	}
   261  
   262  	return htlcSet{
   263  		incomingHTLCs: inHTLCs,
   264  		outgoingHTLCs: outHTLCs,
   265  	}
   266  }
   267  
   268  // HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a
   269  // commitment transaction.
   270  type HtlcSetKey struct {
   271  	// IsRemote denotes if the HTLCs are on the remote commitment
   272  	// transaction.
   273  	IsRemote bool
   274  
   275  	// IsPending denotes if the commitment transaction that HTLCS are on
   276  	// are pending (the higher of two unrevoked commitments).
   277  	IsPending bool
   278  }
   279  
   280  var (
   281  	// LocalHtlcSet is the HtlcSetKey used for local commitments.
   282  	LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false}
   283  
   284  	// RemoteHtlcSet is the HtlcSetKey used for remote commitments.
   285  	RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false}
   286  
   287  	// RemotePendingHtlcSet is the HtlcSetKey used for dangling remote
   288  	// commitment transactions.
   289  	RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true}
   290  )
   291  
   292  // String returns a human readable string describing the target HtlcSetKey.
   293  func (h HtlcSetKey) String() string {
   294  	switch h {
   295  	case LocalHtlcSet:
   296  		return "LocalHtlcSet"
   297  	case RemoteHtlcSet:
   298  		return "RemoteHtlcSet"
   299  	case RemotePendingHtlcSet:
   300  		return "RemotePendingHtlcSet"
   301  	default:
   302  		return "unknown HtlcSetKey"
   303  	}
   304  }
   305  
   306  // ChannelArbitrator is the on-chain arbitrator for a particular channel. The
   307  // struct will keep in sync with the current set of HTLCs on the commitment
   308  // transaction. The job of the attendant is to go on-chain to either settle or
   309  // cancel an HTLC as necessary iff: an HTLC times out, or we known the
   310  // pre-image to an HTLC, but it wasn't settled by the link off-chain. The
   311  // ChannelArbitrator will factor in an expected confirmation delta when
   312  // broadcasting to ensure that we avoid any possibility of race conditions, and
   313  // sweep the output(s) without contest.
   314  type ChannelArbitrator struct {
   315  	started int32 // To be used atomically.
   316  	stopped int32 // To be used atomically.
   317  
   318  	// startTimestamp is the time when this ChannelArbitrator was started.
   319  	startTimestamp time.Time
   320  
   321  	// log is a persistent log that the attendant will use to checkpoint
   322  	// its next action, and the state of any unresolved contracts.
   323  	log ArbitratorLog
   324  
   325  	// activeHTLCs is the set of active incoming/outgoing HTLC's on all
   326  	// currently valid commitment transactions.
   327  	activeHTLCs map[HtlcSetKey]htlcSet
   328  
   329  	// cfg contains all the functionality that the ChannelArbitrator requires
   330  	// to do its duty.
   331  	cfg ChannelArbitratorConfig
   332  
   333  	// blocks is a channel that the arbitrator will receive new blocks on.
   334  	// This channel should be buffered by so that it does not block the
   335  	// sender.
   336  	blocks chan int32
   337  
   338  	// signalUpdates is a channel that any new live signals for the channel
   339  	// we're watching over will be sent.
   340  	signalUpdates chan *signalUpdateMsg
   341  
   342  	// htlcUpdates is a channel that is sent upon with new updates from the
   343  	// active channel. Each time a new commitment state is accepted, the
   344  	// set of HTLC's on the new state should be sent across this channel.
   345  	htlcUpdates <-chan *ContractUpdate
   346  
   347  	// activeResolvers is a slice of any active resolvers. This is used to
   348  	// be able to signal them for shutdown in the case that we shutdown.
   349  	activeResolvers []ContractResolver
   350  
   351  	// activeResolversLock prevents simultaneous read and write to the
   352  	// resolvers slice.
   353  	activeResolversLock sync.RWMutex
   354  
   355  	// resolutionSignal is a channel that will be sent upon by contract
   356  	// resolvers once their contract has been fully resolved. With each
   357  	// send, we'll check to see if the contract is fully resolved.
   358  	resolutionSignal chan struct{}
   359  
   360  	// forceCloseReqs is a channel that requests to forcibly close the
   361  	// contract will be sent over.
   362  	forceCloseReqs chan *forceCloseReq
   363  
   364  	// state is the current state of the arbitrator. This state is examined
   365  	// upon start up to decide which actions to take.
   366  	state ArbitratorState
   367  
   368  	wg   sync.WaitGroup
   369  	quit chan struct{}
   370  }
   371  
   372  // NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by
   373  // the passed config struct.
   374  func NewChannelArbitrator(cfg ChannelArbitratorConfig,
   375  	htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator {
   376  
   377  	return &ChannelArbitrator{
   378  		log:              log,
   379  		blocks:           make(chan int32, arbitratorBlockBufferSize),
   380  		signalUpdates:    make(chan *signalUpdateMsg),
   381  		htlcUpdates:      make(<-chan *ContractUpdate),
   382  		resolutionSignal: make(chan struct{}),
   383  		forceCloseReqs:   make(chan *forceCloseReq),
   384  		activeHTLCs:      htlcSets,
   385  		cfg:              cfg,
   386  		quit:             make(chan struct{}),
   387  	}
   388  }
   389  
   390  // chanArbStartState contains the information from disk that we need to start
   391  // up a channel arbitrator.
   392  type chanArbStartState struct {
   393  	currentState ArbitratorState
   394  	commitSet    *CommitSet
   395  }
   396  
   397  // getStartState retrieves the information from disk that our channel arbitrator
   398  // requires to start.
   399  func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState,
   400  	error) {
   401  
   402  	// First, we'll read our last state from disk, so our internal state
   403  	// machine can act accordingly.
   404  	state, err := c.log.CurrentState(tx)
   405  	if err != nil {
   406  		return nil, err
   407  	}
   408  
   409  	// Next we'll fetch our confirmed commitment set. This will only exist
   410  	// if the channel has been closed out on chain for modern nodes. For
   411  	// older nodes, this won't be found at all, and will rely on the
   412  	// existing written chain actions. Additionally, if this channel hasn't
   413  	// logged any actions in the log, then this field won't be present.
   414  	commitSet, err := c.log.FetchConfirmedCommitSet(tx)
   415  	if err != nil && err != errNoCommitSet && err != errScopeBucketNoExist {
   416  		return nil, err
   417  	}
   418  
   419  	return &chanArbStartState{
   420  		currentState: state,
   421  		commitSet:    commitSet,
   422  	}, nil
   423  }
   424  
   425  // Start starts all the goroutines that the ChannelArbitrator needs to operate.
   426  // If takes a start state, which will be looked up on disk if it is not
   427  // provided.
   428  func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
   429  	if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
   430  		return nil
   431  	}
   432  	c.startTimestamp = c.cfg.Clock.Now()
   433  
   434  	// If the state passed in is nil, we look it up now.
   435  	if state == nil {
   436  		var err error
   437  		state, err = c.getStartState(nil)
   438  		if err != nil {
   439  			return err
   440  		}
   441  	}
   442  
   443  	log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
   444  		c.cfg.ChanPoint, newLogClosure(func() string {
   445  			return spew.Sdump(c.activeHTLCs)
   446  		}), state.currentState,
   447  	)
   448  
   449  	// Set our state from our starting state.
   450  	c.state = state.currentState
   451  
   452  	_, bestHeight, err := c.cfg.ChainIO.GetBestBlock()
   453  	if err != nil {
   454  		return err
   455  	}
   456  
   457  	// If the channel has been marked pending close in the database, and we
   458  	// haven't transitioned the state machine to StateContractClosed (or a
   459  	// succeeding state), then a state transition most likely failed. We'll
   460  	// try to recover from this by manually advancing the state by setting
   461  	// the corresponding close trigger.
   462  	trigger := chainTrigger
   463  	triggerHeight := uint32(bestHeight)
   464  	if c.cfg.IsPendingClose {
   465  		switch c.state {
   466  		case StateDefault:
   467  			fallthrough
   468  		case StateBroadcastCommit:
   469  			fallthrough
   470  		case StateCommitmentBroadcasted:
   471  			switch c.cfg.CloseType {
   472  
   473  			case channeldb.CooperativeClose:
   474  				trigger = coopCloseTrigger
   475  
   476  			case channeldb.BreachClose:
   477  				trigger = breachCloseTrigger
   478  
   479  			case channeldb.LocalForceClose:
   480  				trigger = localCloseTrigger
   481  
   482  			case channeldb.RemoteForceClose:
   483  				trigger = remoteCloseTrigger
   484  			}
   485  
   486  			log.Warnf("ChannelArbitrator(%v): detected stalled "+
   487  				"state=%v for closed channel",
   488  				c.cfg.ChanPoint, c.state)
   489  		}
   490  
   491  		triggerHeight = c.cfg.ClosingHeight
   492  	}
   493  
   494  	log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+
   495  		"triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger,
   496  		triggerHeight)
   497  
   498  	// We'll now attempt to advance our state forward based on the current
   499  	// on-chain state, and our set of active contracts.
   500  	startingState := c.state
   501  	nextState, _, err := c.advanceState(
   502  		triggerHeight, trigger, state.commitSet,
   503  	)
   504  	if err != nil {
   505  		switch err {
   506  
   507  		// If we detect that we tried to fetch resolutions, but failed,
   508  		// this channel was marked closed in the database before
   509  		// resolutions successfully written. In this case there is not
   510  		// much we can do, so we don't return the error.
   511  		case errScopeBucketNoExist:
   512  			fallthrough
   513  		case errNoResolutions:
   514  			log.Warnf("ChannelArbitrator(%v): detected closed"+
   515  				"channel with no contract resolutions written.",
   516  				c.cfg.ChanPoint)
   517  
   518  		default:
   519  			return err
   520  		}
   521  	}
   522  
   523  	// If we start and ended at the awaiting full resolution state, then
   524  	// we'll relaunch our set of unresolved contracts.
   525  	if startingState == StateWaitingFullResolution &&
   526  		nextState == StateWaitingFullResolution {
   527  
   528  		// In order to relaunch the resolvers, we'll need to fetch the
   529  		// set of HTLCs that were present in the commitment transaction
   530  		// at the time it was confirmed. commitSet.ConfCommitKey can't
   531  		// be nil at this point since we're in
   532  		// StateWaitingFullResolution. We can only be in
   533  		// StateWaitingFullResolution after we've transitioned from
   534  		// StateContractClosed which can only be triggered by the local
   535  		// or remote close trigger. This trigger is only fired when we
   536  		// receive a chain event from the chain watcher than the
   537  		// commitment has been confirmed on chain, and before we
   538  		// advance our state step, we call InsertConfirmedCommitSet.
   539  		err := c.relaunchResolvers(state.commitSet, triggerHeight)
   540  		if err != nil {
   541  			return err
   542  		}
   543  	}
   544  
   545  	c.wg.Add(1)
   546  	go c.channelAttendant(bestHeight)
   547  	return nil
   548  }
   549  
   550  // relauchResolvers relaunches the set of resolvers for unresolved contracts in
   551  // order to provide them with information that's not immediately available upon
   552  // starting the ChannelArbitrator. This information should ideally be stored in
   553  // the database, so this only serves as a intermediate work-around to prevent a
   554  // migration.
   555  func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
   556  	heightHint uint32) error {
   557  
   558  	// We'll now query our log to see if there are any active unresolved
   559  	// contracts. If this is the case, then we'll relaunch all contract
   560  	// resolvers.
   561  	unresolvedContracts, err := c.log.FetchUnresolvedContracts()
   562  	if err != nil {
   563  		return err
   564  	}
   565  
   566  	// Retrieve the commitment tx hash from the log.
   567  	contractResolutions, err := c.log.FetchContractResolutions()
   568  	if err != nil {
   569  		log.Errorf("unable to fetch contract resolutions: %v",
   570  			err)
   571  		return err
   572  	}
   573  	commitHash := contractResolutions.CommitHash
   574  
   575  	// In prior versions of lnd, the information needed to supplement the
   576  	// resolvers (in most cases, the full amount of the HTLC) was found in
   577  	// the chain action map, which is now deprecated.  As a result, if the
   578  	// commitSet is nil (an older node with unresolved HTLCs at time of
   579  	// upgrade), then we'll use the chain action information in place. The
   580  	// chain actions may exclude some information, but we cannot recover it
   581  	// for these older nodes at the moment.
   582  	var confirmedHTLCs []channeldb.HTLC
   583  	if commitSet != nil {
   584  		confirmedHTLCs = commitSet.HtlcSets[*commitSet.ConfCommitKey]
   585  	} else {
   586  		chainActions, err := c.log.FetchChainActions()
   587  		if err != nil {
   588  			log.Errorf("unable to fetch chain actions: %v", err)
   589  			return err
   590  		}
   591  		for _, htlcs := range chainActions {
   592  			confirmedHTLCs = append(confirmedHTLCs, htlcs...)
   593  		}
   594  	}
   595  
   596  	// Reconstruct the htlc outpoints and data from the chain action log.
   597  	// The purpose of the constructed htlc map is to supplement to
   598  	// resolvers restored from database with extra data. Ideally this data
   599  	// is stored as part of the resolver in the log. This is a workaround
   600  	// to prevent a db migration. We use all available htlc sets here in
   601  	// order to ensure we have complete coverage.
   602  	htlcMap := make(map[wire.OutPoint]*channeldb.HTLC)
   603  	for _, htlc := range confirmedHTLCs {
   604  		htlc := htlc
   605  		outpoint := wire.OutPoint{
   606  			Hash:  commitHash,
   607  			Index: uint32(htlc.OutputIndex),
   608  		}
   609  		htlcMap[outpoint] = &htlc
   610  	}
   611  
   612  	// We'll also fetch the historical state of this channel, as it should
   613  	// have been marked as closed by now, and supplement it to each resolver
   614  	// such that we can properly resolve our pending contracts.
   615  	var chanState *channeldb.OpenChannel
   616  	chanState, err = c.cfg.FetchHistoricalChannel()
   617  	switch {
   618  	// If we don't find this channel, then it may be the case that it
   619  	// was closed before we started to retain the final state
   620  	// information for open channels.
   621  	case err == channeldb.ErrNoHistoricalBucket:
   622  		fallthrough
   623  	case err == channeldb.ErrChannelNotFound:
   624  		log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
   625  			"state", c.cfg.ChanPoint)
   626  
   627  	case err != nil:
   628  		return err
   629  	}
   630  
   631  	log.Infof("ChannelArbitrator(%v): relaunching %v contract "+
   632  		"resolvers", c.cfg.ChanPoint, len(unresolvedContracts))
   633  
   634  	for _, resolver := range unresolvedContracts {
   635  		if chanState != nil {
   636  			resolver.SupplementState(chanState)
   637  		}
   638  
   639  		htlcResolver, ok := resolver.(htlcContractResolver)
   640  		if !ok {
   641  			continue
   642  		}
   643  
   644  		htlcPoint := htlcResolver.HtlcPoint()
   645  		htlc, ok := htlcMap[htlcPoint]
   646  		if !ok {
   647  			return fmt.Errorf(
   648  				"htlc resolver %T unavailable", resolver,
   649  			)
   650  		}
   651  
   652  		htlcResolver.Supplement(*htlc)
   653  	}
   654  
   655  	// The anchor resolver is stateless and can always be re-instantiated.
   656  	if contractResolutions.AnchorResolution != nil {
   657  		anchorResolver := newAnchorResolver(
   658  			contractResolutions.AnchorResolution.AnchorSignDescriptor,
   659  			contractResolutions.AnchorResolution.CommitAnchor,
   660  			heightHint, c.cfg.ChanPoint,
   661  			ResolverConfig{
   662  				ChannelArbitratorConfig: c.cfg,
   663  			},
   664  		)
   665  		unresolvedContracts = append(unresolvedContracts, anchorResolver)
   666  	}
   667  
   668  	c.launchResolvers(unresolvedContracts)
   669  
   670  	return nil
   671  }
   672  
   673  // Report returns htlc reports for the active resolvers.
   674  func (c *ChannelArbitrator) Report() []*ContractReport {
   675  	c.activeResolversLock.RLock()
   676  	defer c.activeResolversLock.RUnlock()
   677  
   678  	var reports []*ContractReport
   679  	for _, resolver := range c.activeResolvers {
   680  		r, ok := resolver.(reportingContractResolver)
   681  		if !ok {
   682  			continue
   683  		}
   684  
   685  		report := r.report()
   686  		if report == nil {
   687  			continue
   688  		}
   689  
   690  		reports = append(reports, report)
   691  	}
   692  
   693  	return reports
   694  }
   695  
   696  // Stop signals the ChannelArbitrator for a graceful shutdown.
   697  func (c *ChannelArbitrator) Stop() error {
   698  	if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
   699  		return nil
   700  	}
   701  
   702  	log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint)
   703  
   704  	if c.cfg.ChainEvents.Cancel != nil {
   705  		go c.cfg.ChainEvents.Cancel()
   706  	}
   707  
   708  	c.activeResolversLock.RLock()
   709  	for _, activeResolver := range c.activeResolvers {
   710  		activeResolver.Stop()
   711  	}
   712  	c.activeResolversLock.RUnlock()
   713  
   714  	close(c.quit)
   715  	c.wg.Wait()
   716  
   717  	return nil
   718  }
   719  
   720  // transitionTrigger is an enum that denotes exactly *why* a state transition
   721  // was initiated. This is useful as depending on the initial trigger, we may
   722  // skip certain states as those actions are expected to have already taken
   723  // place as a result of the external trigger.
   724  type transitionTrigger uint8
   725  
   726  const (
   727  	// chainTrigger is a transition trigger that has been attempted due to
   728  	// changing on-chain conditions such as a block which times out HTLC's
   729  	// being attached.
   730  	chainTrigger transitionTrigger = iota
   731  
   732  	// userTrigger is a transition trigger driven by user action. Examples
   733  	// of such a trigger include a user requesting a force closure of the
   734  	// channel.
   735  	userTrigger
   736  
   737  	// remoteCloseTrigger is a transition trigger driven by the remote
   738  	// peer's commitment being confirmed.
   739  	remoteCloseTrigger
   740  
   741  	// localCloseTrigger is a transition trigger driven by our commitment
   742  	// being confirmed.
   743  	localCloseTrigger
   744  
   745  	// coopCloseTrigger is a transition trigger driven by a cooperative
   746  	// close transaction being confirmed.
   747  	coopCloseTrigger
   748  
   749  	// breachCloseTrigger is a transition trigger driven by a remote breach
   750  	// being confirmed. In this case the channel arbitrator will wait for
   751  	// the breacharbiter to finish and then clean up gracefully.
   752  	breachCloseTrigger
   753  )
   754  
   755  // String returns a human readable string describing the passed
   756  // transitionTrigger.
   757  func (t transitionTrigger) String() string {
   758  	switch t {
   759  	case chainTrigger:
   760  		return "chainTrigger"
   761  
   762  	case remoteCloseTrigger:
   763  		return "remoteCloseTrigger"
   764  
   765  	case userTrigger:
   766  		return "userTrigger"
   767  
   768  	case localCloseTrigger:
   769  		return "localCloseTrigger"
   770  
   771  	case coopCloseTrigger:
   772  		return "coopCloseTrigger"
   773  
   774  	case breachCloseTrigger:
   775  		return "breachCloseTrigger"
   776  
   777  	default:
   778  		return "unknown trigger"
   779  	}
   780  }
   781  
   782  // stateStep is a help method that examines our internal state, and attempts
   783  // the appropriate state transition if necessary. The next state we transition
   784  // to is returned, Additionally, if the next transition results in a commitment
   785  // broadcast, the commitment transaction itself is returned.
   786  func (c *ChannelArbitrator) stateStep(
   787  	triggerHeight uint32, trigger transitionTrigger,
   788  	confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
   789  
   790  	var (
   791  		nextState ArbitratorState
   792  		closeTx   *wire.MsgTx
   793  	)
   794  	switch c.state {
   795  
   796  	// If we're in the default state, then we'll check our set of actions
   797  	// to see if while we were down, conditions have changed.
   798  	case StateDefault:
   799  		log.Debugf("ChannelArbitrator(%v): new block (height=%v) "+
   800  			"examining active HTLC's", c.cfg.ChanPoint,
   801  			triggerHeight)
   802  
   803  		// As a new block has been connected to the end of the main
   804  		// chain, we'll check to see if we need to make any on-chain
   805  		// claims on behalf of the channel contract that we're
   806  		// arbitrating for. If a commitment has confirmed, then we'll
   807  		// use the set snapshot from the chain, otherwise we'll use our
   808  		// current set.
   809  		var htlcs map[HtlcSetKey]htlcSet
   810  		if confCommitSet != nil {
   811  			htlcs = confCommitSet.toActiveHTLCSets()
   812  		} else {
   813  			htlcs = c.activeHTLCs
   814  		}
   815  		chainActions, err := c.checkLocalChainActions(
   816  			triggerHeight, trigger, htlcs, false,
   817  		)
   818  		if err != nil {
   819  			return StateDefault, nil, err
   820  		}
   821  
   822  		// If there are no actions to be made, then we'll remain in the
   823  		// default state. If this isn't a self initiated event (we're
   824  		// checking due to a chain update), then we'll exit now.
   825  		if len(chainActions) == 0 && trigger == chainTrigger {
   826  			log.Debugf("ChannelArbitrator(%v): no actions for "+
   827  				"chain trigger, terminating", c.cfg.ChanPoint)
   828  
   829  			return StateDefault, closeTx, nil
   830  		}
   831  
   832  		// Otherwise, we'll log that we checked the HTLC actions as the
   833  		// commitment transaction has already been broadcast.
   834  		log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
   835  			c.cfg.ChanPoint,
   836  			newLogClosure(func() string {
   837  				return spew.Sdump(chainActions)
   838  			}))
   839  
   840  		// Depending on the type of trigger, we'll either "tunnel"
   841  		// through to a farther state, or just proceed linearly to the
   842  		// next state.
   843  		switch trigger {
   844  
   845  		// If this is a chain trigger, then we'll go straight to the
   846  		// next state, as we still need to broadcast the commitment
   847  		// transaction.
   848  		case chainTrigger:
   849  			fallthrough
   850  		case userTrigger:
   851  			nextState = StateBroadcastCommit
   852  
   853  		// If the trigger is a cooperative close being confirmed, then
   854  		// we can go straight to StateFullyResolved, as there won't be
   855  		// any contracts to resolve.
   856  		case coopCloseTrigger:
   857  			nextState = StateFullyResolved
   858  
   859  		// Otherwise, if this state advance was triggered by a
   860  		// commitment being confirmed on chain, then we'll jump
   861  		// straight to the state where the contract has already been
   862  		// closed, and we will inspect the set of unresolved contracts.
   863  		case localCloseTrigger:
   864  			log.Errorf("ChannelArbitrator(%v): unexpected local "+
   865  				"commitment confirmed while in StateDefault",
   866  				c.cfg.ChanPoint)
   867  			fallthrough
   868  		case remoteCloseTrigger:
   869  			nextState = StateContractClosed
   870  
   871  		case breachCloseTrigger:
   872  			nextContractState, err := c.checkLegacyBreach()
   873  			if nextContractState == StateError {
   874  				return nextContractState, nil, err
   875  			}
   876  
   877  			nextState = nextContractState
   878  		}
   879  
   880  	// If we're in this state, then we've decided to broadcast the
   881  	// commitment transaction. We enter this state either due to an outside
   882  	// sub-system, or because an on-chain action has been triggered.
   883  	case StateBroadcastCommit:
   884  		// Under normal operation, we can only enter
   885  		// StateBroadcastCommit via a user or chain trigger. On restart,
   886  		// this state may be reexecuted after closing the channel, but
   887  		// failing to commit to StateContractClosed or
   888  		// StateFullyResolved. In that case, one of the four close
   889  		// triggers will be presented, signifying that we should skip
   890  		// rebroadcasting, and go straight to resolving the on-chain
   891  		// contract or marking the channel resolved.
   892  		switch trigger {
   893  		case localCloseTrigger, remoteCloseTrigger:
   894  			log.Infof("ChannelArbitrator(%v): detected %s "+
   895  				"close after closing channel, fast-forwarding "+
   896  				"to %s to resolve contract",
   897  				c.cfg.ChanPoint, trigger, StateContractClosed)
   898  			return StateContractClosed, closeTx, nil
   899  
   900  		case breachCloseTrigger:
   901  			nextContractState, err := c.checkLegacyBreach()
   902  			if nextContractState == StateError {
   903  				log.Infof("ChannelArbitrator(%v): unable to "+
   904  					"advance breach close resolution: %v",
   905  					c.cfg.ChanPoint, nextContractState)
   906  				return StateError, closeTx, err
   907  			}
   908  
   909  			log.Infof("ChannelArbitrator(%v): detected %s close "+
   910  				"after closing channel, fast-forwarding to %s"+
   911  				" to resolve contract", c.cfg.ChanPoint,
   912  				trigger, nextContractState)
   913  
   914  			return nextContractState, closeTx, nil
   915  
   916  		case coopCloseTrigger:
   917  			log.Infof("ChannelArbitrator(%v): detected %s "+
   918  				"close after closing channel, fast-forwarding "+
   919  				"to %s to resolve contract",
   920  				c.cfg.ChanPoint, trigger, StateFullyResolved)
   921  			return StateFullyResolved, closeTx, nil
   922  		}
   923  
   924  		log.Infof("ChannelArbitrator(%v): force closing "+
   925  			"chan", c.cfg.ChanPoint)
   926  
   927  		// Now that we have all the actions decided for the set of
   928  		// HTLC's, we'll broadcast the commitment transaction, and
   929  		// signal the link to exit.
   930  
   931  		// We'll tell the switch that it should remove the link for
   932  		// this channel, in addition to fetching the force close
   933  		// summary needed to close this channel on chain.
   934  		closeSummary, err := c.cfg.Channel.ForceCloseChan()
   935  		if err != nil {
   936  			log.Errorf("ChannelArbitrator(%v): unable to "+
   937  				"force close: %v", c.cfg.ChanPoint, err)
   938  			return StateError, closeTx, err
   939  		}
   940  		closeTx = closeSummary.CloseTx
   941  
   942  		// Before publishing the transaction, we store it to the
   943  		// database, such that we can re-publish later in case it
   944  		// didn't propagate. We initiated the force close, so we
   945  		// mark broadcast with local initiator set to true.
   946  		err = c.cfg.MarkCommitmentBroadcasted(closeTx, true)
   947  		if err != nil {
   948  			log.Errorf("ChannelArbitrator(%v): unable to "+
   949  				"mark commitment broadcasted: %v",
   950  				c.cfg.ChanPoint, err)
   951  			return StateError, closeTx, err
   952  		}
   953  
   954  		// With the close transaction in hand, broadcast the
   955  		// transaction to the network, thereby entering the post
   956  		// channel resolution state.
   957  		log.Infof("Broadcasting force close transaction %v, "+
   958  			"ChannelPoint(%v): %v", closeTx.TxHash(),
   959  			c.cfg.ChanPoint,
   960  			newLogClosure(func() string {
   961  				return spew.Sdump(closeTx)
   962  			}))
   963  
   964  		// At this point, we'll now broadcast the commitment
   965  		// transaction itself.
   966  		label := labels.MakeLabel(
   967  			labels.LabelTypeChannelClose, &c.cfg.ShortChanID,
   968  		)
   969  
   970  		if err := c.cfg.PublishTx(closeTx, label); err != nil {
   971  			log.Errorf("ChannelArbitrator(%v): unable to broadcast "+
   972  				"close tx: %v", c.cfg.ChanPoint, err)
   973  			if err != lnwallet.ErrDoubleSpend {
   974  				return StateError, closeTx, err
   975  			}
   976  		}
   977  
   978  		// We go to the StateCommitmentBroadcasted state, where we'll
   979  		// be waiting for the commitment to be confirmed.
   980  		nextState = StateCommitmentBroadcasted
   981  
   982  	// In this state we have broadcasted our own commitment, and will need
   983  	// to wait for a commitment (not necessarily the one we broadcasted!)
   984  	// to be confirmed.
   985  	case StateCommitmentBroadcasted:
   986  		switch trigger {
   987  
   988  		// We are waiting for a commitment to be confirmed.
   989  		case chainTrigger, userTrigger:
   990  			// The commitment transaction has been broadcast, but it
   991  			// doesn't necessarily need to be the commitment
   992  			// transaction version that is going to be confirmed. To
   993  			// be sure that any of those versions can be anchored
   994  			// down, we now submit all anchor resolutions to the
   995  			// sweeper. The sweeper will keep trying to sweep all of
   996  			// them.
   997  			//
   998  			// Note that the sweeper is idempotent. If we ever
   999  			// happen to end up at this point in the code again, no
  1000  			// harm is done by re-offering the anchors to the
  1001  			// sweeper.
  1002  			anchors, err := c.cfg.Channel.NewAnchorResolutions()
  1003  			if err != nil {
  1004  				return StateError, closeTx, err
  1005  			}
  1006  
  1007  			err = c.sweepAnchors(anchors, triggerHeight)
  1008  			if err != nil {
  1009  				return StateError, closeTx, err
  1010  			}
  1011  
  1012  			nextState = StateCommitmentBroadcasted
  1013  
  1014  		// If this state advance was triggered by any of the
  1015  		// commitments being confirmed, then we'll jump to the state
  1016  		// where the contract has been closed.
  1017  		case localCloseTrigger, remoteCloseTrigger:
  1018  			nextState = StateContractClosed
  1019  
  1020  		// If a coop close was confirmed, jump straight to the fully
  1021  		// resolved state.
  1022  		case coopCloseTrigger:
  1023  			nextState = StateFullyResolved
  1024  
  1025  		case breachCloseTrigger:
  1026  			nextContractState, err := c.checkLegacyBreach()
  1027  			if nextContractState == StateError {
  1028  				return nextContractState, closeTx, err
  1029  			}
  1030  
  1031  			nextState = nextContractState
  1032  		}
  1033  
  1034  		log.Infof("ChannelArbitrator(%v): trigger %v moving from "+
  1035  			"state %v to %v", c.cfg.ChanPoint, trigger, c.state,
  1036  			nextState)
  1037  
  1038  	// If we're in this state, then the contract has been fully closed to
  1039  	// outside sub-systems, so we'll process the prior set of on-chain
  1040  	// contract actions and launch a set of resolvers.
  1041  	case StateContractClosed:
  1042  		// First, we'll fetch our chain actions, and both sets of
  1043  		// resolutions so we can process them.
  1044  		contractResolutions, err := c.log.FetchContractResolutions()
  1045  		if err != nil {
  1046  			log.Errorf("unable to fetch contract resolutions: %v",
  1047  				err)
  1048  			return StateError, closeTx, err
  1049  		}
  1050  
  1051  		// If the resolution is empty, and we have no HTLCs at all to
  1052  		// send to, then we're done here. We don't need to launch any
  1053  		// resolvers, and can go straight to our final state.
  1054  		if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
  1055  			log.Infof("ChannelArbitrator(%v): contract "+
  1056  				"resolutions empty, marking channel as fully resolved!",
  1057  				c.cfg.ChanPoint)
  1058  			nextState = StateFullyResolved
  1059  			break
  1060  		}
  1061  
  1062  		// Now that we know we'll need to act, we'll process the htlc
  1063  		// actions, then create the structures we need to resolve all
  1064  		// outstanding contracts.
  1065  		htlcResolvers, pktsToSend, err := c.prepContractResolutions(
  1066  			contractResolutions, triggerHeight, trigger,
  1067  			confCommitSet,
  1068  		)
  1069  		if err != nil {
  1070  			log.Errorf("ChannelArbitrator(%v): unable to "+
  1071  				"resolve contracts: %v", c.cfg.ChanPoint, err)
  1072  			return StateError, closeTx, err
  1073  		}
  1074  
  1075  		log.Debugf("ChannelArbitrator(%v): sending resolution message=%v",
  1076  			c.cfg.ChanPoint,
  1077  			newLogClosure(func() string {
  1078  				return spew.Sdump(pktsToSend)
  1079  			}))
  1080  
  1081  		// With the commitment broadcast, we'll then send over all
  1082  		// messages we can send immediately.
  1083  		if len(pktsToSend) != 0 {
  1084  			err := c.cfg.DeliverResolutionMsg(pktsToSend...)
  1085  			if err != nil {
  1086  				// TODO(roasbeef): make sure packet sends are
  1087  				// idempotent
  1088  				log.Errorf("unable to send pkts: %v", err)
  1089  				return StateError, closeTx, err
  1090  			}
  1091  		}
  1092  
  1093  		log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
  1094  			"resolvers", c.cfg.ChanPoint, len(htlcResolvers))
  1095  
  1096  		err = c.log.InsertUnresolvedContracts(nil, htlcResolvers...)
  1097  		if err != nil {
  1098  			return StateError, closeTx, err
  1099  		}
  1100  
  1101  		// Finally, we'll launch all the required contract resolvers.
  1102  		// Once they're all resolved, we're no longer needed.
  1103  		c.launchResolvers(htlcResolvers)
  1104  
  1105  		nextState = StateWaitingFullResolution
  1106  
  1107  	// This is our terminal state. We'll keep returning this state until
  1108  	// all contracts are fully resolved.
  1109  	case StateWaitingFullResolution:
  1110  		log.Infof("ChannelArbitrator(%v): still awaiting contract "+
  1111  			"resolution", c.cfg.ChanPoint)
  1112  
  1113  		numUnresolved, err := c.log.FetchUnresolvedContracts()
  1114  		if err != nil {
  1115  			return StateError, closeTx, err
  1116  		}
  1117  
  1118  		// If we still have unresolved contracts, then we'll stay alive
  1119  		// to oversee their resolution.
  1120  		if len(numUnresolved) != 0 {
  1121  			nextState = StateWaitingFullResolution
  1122  			break
  1123  		}
  1124  
  1125  		nextState = StateFullyResolved
  1126  
  1127  	// If we start as fully resolved, then we'll end as fully resolved.
  1128  	case StateFullyResolved:
  1129  		// To ensure that the state of the contract in persistent
  1130  		// storage is properly reflected, we'll mark the contract as
  1131  		// fully resolved now.
  1132  		nextState = StateFullyResolved
  1133  
  1134  		log.Infof("ChannelPoint(%v) has been fully resolved "+
  1135  			"on-chain at height=%v", c.cfg.ChanPoint, triggerHeight)
  1136  
  1137  		if err := c.cfg.MarkChannelResolved(); err != nil {
  1138  			log.Errorf("unable to mark channel resolved: %v", err)
  1139  			return StateError, closeTx, err
  1140  		}
  1141  	}
  1142  
  1143  	log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint,
  1144  		nextState)
  1145  
  1146  	return nextState, closeTx, nil
  1147  }
  1148  
  1149  // sweepAnchors offers all given anchor resolutions to the sweeper. It requests
  1150  // sweeping at the minimum fee rate. This fee rate can be upped manually by the
  1151  // user via the BumpFee rpc.
  1152  func (c *ChannelArbitrator) sweepAnchors(anchors *lnwallet.AnchorResolutions,
  1153  	heightHint uint32) error {
  1154  
  1155  	// Use the chan id as the exclusive group. This prevents any of the
  1156  	// anchors from being batched together.
  1157  	exclusiveGroup := c.cfg.ShortChanID.ToUint64()
  1158  
  1159  	// sweepWithDeadline is a helper closure that takes an anchor
  1160  	// resolution and sweeps it with its corresponding deadline.
  1161  	sweepWithDeadline := func(anchor *lnwallet.AnchorResolution,
  1162  		htlcs htlcSet, anchorPath string) error {
  1163  
  1164  		// Find the deadline for this specific anchor.
  1165  		deadline, err := c.findCommitmentDeadline(heightHint, htlcs)
  1166  		if err != nil {
  1167  			return err
  1168  		}
  1169  
  1170  		log.Debugf("ChannelArbitrator(%v): pre-confirmation sweep of "+
  1171  			"anchor of %s commit tx %v", c.cfg.ChanPoint,
  1172  			anchorPath, anchor.CommitAnchor)
  1173  
  1174  		// Prepare anchor output for sweeping.
  1175  		anchorInput := input.MakeBaseInput(
  1176  			&anchor.CommitAnchor,
  1177  			input.CommitmentAnchor,
  1178  			&anchor.AnchorSignDescriptor,
  1179  			heightHint,
  1180  			&input.TxInfo{
  1181  				Fee:  anchor.CommitFee,
  1182  				Size: anchor.CommitSize,
  1183  			},
  1184  		)
  1185  
  1186  		// Sweep anchor output with a confirmation target fee
  1187  		// preference. Because this is a cpfp-operation, the anchor will
  1188  		// only be attempted to sweep when the current fee estimate for
  1189  		// the confirmation target exceeds the commit fee rate.
  1190  		//
  1191  		// Also signal that this is a force sweep, so that the anchor
  1192  		// will be swept even if it isn't economical purely based on the
  1193  		// anchor value.
  1194  		_, err = c.cfg.Sweeper.SweepInput(
  1195  			&anchorInput,
  1196  			sweep.Params{
  1197  				Fee: sweep.FeePreference{
  1198  					ConfTarget: deadline,
  1199  				},
  1200  				Force:          true,
  1201  				ExclusiveGroup: &exclusiveGroup,
  1202  			},
  1203  		)
  1204  		if err != nil {
  1205  			return err
  1206  		}
  1207  
  1208  		return nil
  1209  	}
  1210  
  1211  	// Sweep anchors based on different HTLC sets. Notice the HTLC sets may
  1212  	// differ across commitments, thus their deadline values could vary.
  1213  	for htlcSet, htlcs := range c.activeHTLCs {
  1214  		switch {
  1215  		case htlcSet == LocalHtlcSet && anchors.Local != nil:
  1216  			err := sweepWithDeadline(anchors.Local, htlcs, "local")
  1217  			if err != nil {
  1218  				return err
  1219  			}
  1220  
  1221  		case htlcSet == RemoteHtlcSet && anchors.Remote != nil:
  1222  			err := sweepWithDeadline(
  1223  				anchors.Remote, htlcs, "remote",
  1224  			)
  1225  			if err != nil {
  1226  				return err
  1227  			}
  1228  
  1229  		case htlcSet == RemotePendingHtlcSet &&
  1230  			anchors.RemotePending != nil:
  1231  
  1232  			err := sweepWithDeadline(
  1233  				anchors.RemotePending, htlcs, "remote pending",
  1234  			)
  1235  			if err != nil {
  1236  				return err
  1237  			}
  1238  		}
  1239  	}
  1240  
  1241  	return nil
  1242  }
  1243  
  1244  // findCommitmentDeadline finds the deadline (relative block height) for a
  1245  // commitment transaction by extracting the minimum CLTV from its HTLCs. From
  1246  // our PoV, the deadline is defined to be the smaller of,
  1247  //   - the least CLTV from outgoing HTLCs,  or,
  1248  //   - the least CLTV from incoming HTLCs if the preimage is available.
  1249  //
  1250  // Note: when the deadline turns out to be 0 blocks, we will replace it with 1
  1251  // block because our fee estimator doesn't allow a 0 conf target. This also
  1252  // means we've left behind and should increase our fee to make the transaction
  1253  // confirmed asap.
  1254  func (c *ChannelArbitrator) findCommitmentDeadline(heightHint uint32,
  1255  	htlcs htlcSet) (uint32, error) {
  1256  
  1257  	deadlineMinHeight := uint32(math.MaxUint32)
  1258  
  1259  	// First, iterate through the outgoingHTLCs to find the lowest CLTV
  1260  	// value.
  1261  	for _, htlc := range htlcs.outgoingHTLCs {
  1262  		// Skip if the HTLC is dust.
  1263  		if htlc.OutputIndex < 0 {
  1264  			log.Debugf("ChannelArbitrator(%v): skipped deadline "+
  1265  				"for dust htlc=%x",
  1266  				c.cfg.ChanPoint, htlc.RHash[:])
  1267  
  1268  			continue
  1269  		}
  1270  
  1271  		if htlc.RefundTimeout < deadlineMinHeight {
  1272  			deadlineMinHeight = htlc.RefundTimeout
  1273  		}
  1274  	}
  1275  
  1276  	// Then going through the incomingHTLCs, and update the minHeight when
  1277  	// conditions met.
  1278  	for _, htlc := range htlcs.incomingHTLCs {
  1279  		// Skip if the HTLC is dust.
  1280  		if htlc.OutputIndex < 0 {
  1281  			log.Debugf("ChannelArbitrator(%v): skipped deadline "+
  1282  				"for dust htlc=%x",
  1283  				c.cfg.ChanPoint, htlc.RHash[:])
  1284  
  1285  			continue
  1286  		}
  1287  
  1288  		// Since it's an HTLC sent to us, check if we have preimage for
  1289  		// this HTLC.
  1290  		preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
  1291  		if err != nil {
  1292  			return 0, err
  1293  		}
  1294  
  1295  		if !preimageAvailable {
  1296  			continue
  1297  		}
  1298  
  1299  		if htlc.RefundTimeout < deadlineMinHeight {
  1300  			deadlineMinHeight = htlc.RefundTimeout
  1301  		}
  1302  	}
  1303  
  1304  	// Calculate the deadline. There are two cases to be handled here,
  1305  	//   - when the deadlineMinHeight never gets updated, which could
  1306  	//     happen when we have no outgoing HTLCs, and, for incoming HTLCs,
  1307  	//       * either we have none, or,
  1308  	//       * none of the HTLCs are preimageAvailable.
  1309  	//   - when our deadlineMinHeight is no greater than the heightHint,
  1310  	//     which means we are behind our schedule.
  1311  	deadline := deadlineMinHeight - heightHint
  1312  	switch {
  1313  	// When we couldn't find a deadline height from our HTLCs, we will fall
  1314  	// back to the default value.
  1315  	case deadlineMinHeight == math.MaxUint32:
  1316  		deadline = anchorSweepConfTarget
  1317  
  1318  	// When the deadline is passed, we will fall back to the smallest conf
  1319  	// target (1 block).
  1320  	case deadlineMinHeight <= heightHint:
  1321  		log.Warnf("ChannelArbitrator(%v): deadline is passed with "+
  1322  			"deadlineMinHeight=%d, heightHint=%d",
  1323  			c.cfg.ChanPoint, deadlineMinHeight, heightHint)
  1324  		deadline = 1
  1325  	}
  1326  
  1327  	log.Debugf("ChannelArbitrator(%v): calculated deadline: %d, "+
  1328  		"using deadlineMinHeight=%d, heightHint=%d",
  1329  		c.cfg.ChanPoint, deadline, deadlineMinHeight, heightHint)
  1330  
  1331  	return deadline, nil
  1332  }
  1333  
  1334  // launchResolvers updates the activeResolvers list and starts the resolvers.
  1335  func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver) {
  1336  	c.activeResolversLock.Lock()
  1337  	defer c.activeResolversLock.Unlock()
  1338  
  1339  	c.activeResolvers = resolvers
  1340  	for _, contract := range resolvers {
  1341  		c.wg.Add(1)
  1342  		go c.resolveContract(contract)
  1343  	}
  1344  }
  1345  
  1346  // advanceState is the main driver of our state machine. This method is an
  1347  // iterative function which repeatedly attempts to advance the internal state
  1348  // of the channel arbitrator. The state will be advanced until we reach a
  1349  // redundant transition, meaning that the state transition is a noop. The final
  1350  // param is a callback that allows the caller to execute an arbitrary action
  1351  // after each state transition.
  1352  func (c *ChannelArbitrator) advanceState(
  1353  	triggerHeight uint32, trigger transitionTrigger,
  1354  	confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
  1355  
  1356  	var (
  1357  		priorState   ArbitratorState
  1358  		forceCloseTx *wire.MsgTx
  1359  	)
  1360  
  1361  	// We'll continue to advance our state forward until the state we
  1362  	// transition to is that same state that we started at.
  1363  	for {
  1364  		priorState = c.state
  1365  		log.Debugf("ChannelArbitrator(%v): attempting state step with "+
  1366  			"trigger=%v from state=%v", c.cfg.ChanPoint, trigger,
  1367  			priorState)
  1368  
  1369  		nextState, closeTx, err := c.stateStep(
  1370  			triggerHeight, trigger, confCommitSet,
  1371  		)
  1372  		if err != nil {
  1373  			log.Errorf("ChannelArbitrator(%v): unable to advance "+
  1374  				"state: %v", c.cfg.ChanPoint, err)
  1375  			return priorState, nil, err
  1376  		}
  1377  
  1378  		if forceCloseTx == nil && closeTx != nil {
  1379  			forceCloseTx = closeTx
  1380  		}
  1381  
  1382  		// Our termination transition is a noop transition. If we get
  1383  		// our prior state back as the next state, then we'll
  1384  		// terminate.
  1385  		if nextState == priorState {
  1386  			log.Debugf("ChannelArbitrator(%v): terminating at "+
  1387  				"state=%v", c.cfg.ChanPoint, nextState)
  1388  			return nextState, forceCloseTx, nil
  1389  		}
  1390  
  1391  		// As the prior state was successfully executed, we can now
  1392  		// commit the next state. This ensures that we will re-execute
  1393  		// the prior state if anything fails.
  1394  		if err := c.log.CommitState(nextState); err != nil {
  1395  			log.Errorf("ChannelArbitrator(%v): unable to commit "+
  1396  				"next state(%v): %v", c.cfg.ChanPoint,
  1397  				nextState, err)
  1398  			return priorState, nil, err
  1399  		}
  1400  		c.state = nextState
  1401  	}
  1402  }
  1403  
  1404  // ChainAction is an enum that encompasses all possible on-chain actions
  1405  // we'll take for a set of HTLC's.
  1406  type ChainAction uint8
  1407  
  1408  const (
  1409  	// NoAction is the min chainAction type, indicating that no action
  1410  	// needs to be taken for a given HTLC.
  1411  	NoAction ChainAction = 0
  1412  
  1413  	// HtlcTimeoutAction indicates that the HTLC will timeout soon. As a
  1414  	// result, we should get ready to sweep it on chain after the timeout.
  1415  	HtlcTimeoutAction = 1
  1416  
  1417  	// HtlcClaimAction indicates that we should claim the HTLC on chain
  1418  	// before its timeout period.
  1419  	HtlcClaimAction = 2
  1420  
  1421  	// HtlcFailNowAction indicates that we should fail an outgoing HTLC
  1422  	// immediately by cancelling it backwards as it has no corresponding
  1423  	// output in our commitment transaction.
  1424  	HtlcFailNowAction = 3
  1425  
  1426  	// HtlcOutgoingWatchAction indicates that we can't yet timeout this
  1427  	// HTLC, but we had to go to chain on order to resolve an existing
  1428  	// HTLC.  In this case, we'll either: time it out once it expires, or
  1429  	// will learn the pre-image if the remote party claims the output. In
  1430  	// this case, well add the pre-image to our global store.
  1431  	HtlcOutgoingWatchAction = 4
  1432  
  1433  	// HtlcIncomingWatchAction indicates that we don't yet have the
  1434  	// pre-image to claim incoming HTLC, but we had to go to chain in order
  1435  	// to resolve and existing HTLC. In this case, we'll either: let the
  1436  	// other party time it out, or eventually learn of the pre-image, in
  1437  	// which case we'll claim on chain.
  1438  	HtlcIncomingWatchAction = 5
  1439  )
  1440  
  1441  // String returns a human readable string describing a chain action.
  1442  func (c ChainAction) String() string {
  1443  	switch c {
  1444  	case NoAction:
  1445  		return "NoAction"
  1446  
  1447  	case HtlcTimeoutAction:
  1448  		return "HtlcTimeoutAction"
  1449  
  1450  	case HtlcClaimAction:
  1451  		return "HtlcClaimAction"
  1452  
  1453  	case HtlcFailNowAction:
  1454  		return "HtlcFailNowAction"
  1455  
  1456  	case HtlcOutgoingWatchAction:
  1457  		return "HtlcOutgoingWatchAction"
  1458  
  1459  	case HtlcIncomingWatchAction:
  1460  		return "HtlcIncomingWatchAction"
  1461  
  1462  	default:
  1463  		return "<unknown action>"
  1464  	}
  1465  }
  1466  
  1467  // ChainActionMap is a map of a chain action, to the set of HTLC's that need to
  1468  // be acted upon for a given action type. The channel
  1469  type ChainActionMap map[ChainAction][]channeldb.HTLC
  1470  
  1471  // Merge merges the passed chain actions with the target chain action map.
  1472  func (c ChainActionMap) Merge(actions ChainActionMap) {
  1473  	for chainAction, htlcs := range actions {
  1474  		c[chainAction] = append(c[chainAction], htlcs...)
  1475  	}
  1476  }
  1477  
  1478  // shouldGoOnChain takes into account the absolute timeout of the HTLC, if the
  1479  // confirmation delta that we need is close, and returns a bool indicating if
  1480  // we should go on chain to claim.  We do this rather than waiting up until the
  1481  // last minute as we want to ensure that when we *need* (HTLC is timed out) to
  1482  // sweep, the commitment is already confirmed.
  1483  func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC,
  1484  	broadcastDelta, currentHeight uint32) bool {
  1485  
  1486  	// We'll calculate the broadcast cut off for this HTLC. This is the
  1487  	// height that (based on our current fee estimation) we should
  1488  	// broadcast in order to ensure the commitment transaction is confirmed
  1489  	// before the HTLC fully expires.
  1490  	broadcastCutOff := htlc.RefundTimeout - broadcastDelta
  1491  
  1492  	log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+
  1493  		"expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout,
  1494  		broadcastCutOff, currentHeight)
  1495  
  1496  	// TODO(roasbeef): take into account default HTLC delta, don't need to
  1497  	// broadcast immediately
  1498  	//  * can then batch with SINGLE | ANYONECANPAY
  1499  
  1500  	// We should on-chain for this HTLC, iff we're within out broadcast
  1501  	// cutoff window.
  1502  	if currentHeight < broadcastCutOff {
  1503  		return false
  1504  	}
  1505  
  1506  	// In case of incoming htlc we should go to chain.
  1507  	if htlc.Incoming {
  1508  		return true
  1509  	}
  1510  
  1511  	// For htlcs that are result of our initiated payments we give some grace
  1512  	// period before force closing the channel. During this time we expect
  1513  	// both nodes to connect and give a chance to the other node to send its
  1514  	// updates and cancel the htlc.
  1515  	// This shouldn't add any security risk as there is no incoming htlc to
  1516  	// fulfill at this case and the expectation is that when the channel is
  1517  	// active the other node will send update_fail_htlc to remove the htlc
  1518  	// without closing the channel. It is up to the user to force close the
  1519  	// channel if the peer misbehaves and doesn't send the update_fail_htlc.
  1520  	// It is useful when this node is most of the time not online and is
  1521  	// likely to miss the time slot where the htlc may be cancelled.
  1522  	isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex)
  1523  	upTime := c.cfg.Clock.Now().Sub(c.startTimestamp)
  1524  	return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod
  1525  }
  1526  
  1527  // checkCommitChainActions is called for each new block connected to the end of
  1528  // the main chain. Given the new block height, this new method will examine all
  1529  // active HTLC's, and determine if we need to go on-chain to claim any of them.
  1530  // A map of action -> []htlc is returned, detailing what action (if any) should
  1531  // be performed for each HTLC. For timed out HTLC's, once the commitment has
  1532  // been sufficiently confirmed, the HTLC's should be canceled backwards. For
  1533  // redeemed HTLC's, we should send the pre-image back to the incoming link.
  1534  func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
  1535  	trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, error) {
  1536  
  1537  	// TODO(roasbeef): would need to lock channel? channel totem?
  1538  	//  * race condition if adding and we broadcast, etc
  1539  	//  * or would make each instance sync?
  1540  
  1541  	log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+
  1542  		"height=%v, in_htlc_count=%v, out_htlc_count=%v",
  1543  		c.cfg.ChanPoint, height,
  1544  		len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs))
  1545  
  1546  	actionMap := make(ChainActionMap)
  1547  
  1548  	// First, we'll make an initial pass over the set of incoming and
  1549  	// outgoing HTLC's to decide if we need to go on chain at all.
  1550  	haveChainActions := false
  1551  	for _, htlc := range htlcs.outgoingHTLCs {
  1552  		// We'll need to go on-chain for an outgoing HTLC if it was
  1553  		// never resolved downstream, and it's "close" to timing out.
  1554  		toChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
  1555  			height,
  1556  		)
  1557  
  1558  		if toChain {
  1559  			log.Debugf("ChannelArbitrator(%v): go to chain for "+
  1560  				"outgoing htlc %x: timeout=%v, "+
  1561  				"blocks_until_expiry=%v, broadcast_delta=%v",
  1562  				c.cfg.ChanPoint, htlc.RHash[:],
  1563  				htlc.RefundTimeout, htlc.RefundTimeout-height,
  1564  				c.cfg.OutgoingBroadcastDelta,
  1565  			)
  1566  		}
  1567  
  1568  		haveChainActions = haveChainActions || toChain
  1569  	}
  1570  
  1571  	for _, htlc := range htlcs.incomingHTLCs {
  1572  		// We'll need to go on-chain to pull an incoming HTLC iff we
  1573  		// know the pre-image and it's close to timing out. We need to
  1574  		// ensure that we claim the funds that our rightfully ours
  1575  		// on-chain.
  1576  		preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
  1577  		if err != nil {
  1578  			return nil, err
  1579  		}
  1580  
  1581  		if !preimageAvailable {
  1582  			continue
  1583  		}
  1584  
  1585  		toChain := c.shouldGoOnChain(htlc, c.cfg.IncomingBroadcastDelta,
  1586  			height,
  1587  		)
  1588  
  1589  		if toChain {
  1590  			log.Debugf("ChannelArbitrator(%v): go to chain for "+
  1591  				"incoming htlc %x: timeout=%v, "+
  1592  				"blocks_until_expiry=%v, broadcast_delta=%v",
  1593  				c.cfg.ChanPoint, htlc.RHash[:],
  1594  				htlc.RefundTimeout, htlc.RefundTimeout-height,
  1595  				c.cfg.IncomingBroadcastDelta,
  1596  			)
  1597  		}
  1598  
  1599  		haveChainActions = haveChainActions || toChain
  1600  	}
  1601  
  1602  	// If we don't have any actions to make, then we'll return an empty
  1603  	// action map. We only do this if this was a chain trigger though, as
  1604  	// if we're going to broadcast the commitment (or the remote party did)
  1605  	// we're *forced* to act on each HTLC.
  1606  	if !haveChainActions && trigger == chainTrigger {
  1607  		log.Tracef("ChannelArbitrator(%v): no actions to take at "+
  1608  			"height=%v", c.cfg.ChanPoint, height)
  1609  		return actionMap, nil
  1610  	}
  1611  
  1612  	// Now that we know we'll need to go on-chain, we'll examine all of our
  1613  	// active outgoing HTLC's to see if we either need to: sweep them after
  1614  	// a timeout (then cancel backwards), cancel them backwards
  1615  	// immediately, or watch them as they're still active contracts.
  1616  	for _, htlc := range htlcs.outgoingHTLCs {
  1617  		switch {
  1618  		// If the HTLC is dust, then we can cancel it backwards
  1619  		// immediately as there's no matching contract to arbitrate
  1620  		// on-chain. We know the HTLC is dust, if the OutputIndex
  1621  		// negative.
  1622  		case htlc.OutputIndex < 0:
  1623  			log.Tracef("ChannelArbitrator(%v): immediately "+
  1624  				"failing dust htlc=%x", c.cfg.ChanPoint,
  1625  				htlc.RHash[:])
  1626  
  1627  			actionMap[HtlcFailNowAction] = append(
  1628  				actionMap[HtlcFailNowAction], htlc,
  1629  			)
  1630  
  1631  		// If we don't need to immediately act on this HTLC, then we'll
  1632  		// mark it still "live". After we broadcast, we'll monitor it
  1633  		// until the HTLC times out to see if we can also redeem it
  1634  		// on-chain.
  1635  		case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
  1636  			height,
  1637  		):
  1638  			// TODO(roasbeef): also need to be able to query
  1639  			// circuit map to see if HTLC hasn't been fully
  1640  			// resolved
  1641  			//
  1642  			//  * can't fail incoming until if outgoing not yet
  1643  			//  failed
  1644  
  1645  			log.Tracef("ChannelArbitrator(%v): watching chain to "+
  1646  				"decide action for outgoing htlc=%x",
  1647  				c.cfg.ChanPoint, htlc.RHash[:])
  1648  
  1649  			actionMap[HtlcOutgoingWatchAction] = append(
  1650  				actionMap[HtlcOutgoingWatchAction], htlc,
  1651  			)
  1652  
  1653  		// Otherwise, we'll update our actionMap to mark that we need
  1654  		// to sweep this HTLC on-chain
  1655  		default:
  1656  			log.Tracef("ChannelArbitrator(%v): going on-chain to "+
  1657  				"timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:])
  1658  
  1659  			actionMap[HtlcTimeoutAction] = append(
  1660  				actionMap[HtlcTimeoutAction], htlc,
  1661  			)
  1662  		}
  1663  	}
  1664  
  1665  	// Similarly, for each incoming HTLC, now that we need to go on-chain,
  1666  	// we'll either: sweep it immediately if we know the pre-image, or
  1667  	// observe the output on-chain if we don't In this last, case we'll
  1668  	// either learn of it eventually from the outgoing HTLC, or the sender
  1669  	// will timeout the HTLC.
  1670  	for _, htlc := range htlcs.incomingHTLCs {
  1671  		// If the HTLC is dust, there is no action to be taken.
  1672  		if htlc.OutputIndex < 0 {
  1673  			log.Debugf("ChannelArbitrator(%v): no resolution "+
  1674  				"needed for incoming dust htlc=%x",
  1675  				c.cfg.ChanPoint, htlc.RHash[:])
  1676  
  1677  			continue
  1678  		}
  1679  
  1680  		log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
  1681  			"action for incoming htlc=%x", c.cfg.ChanPoint,
  1682  			htlc.RHash[:])
  1683  
  1684  		actionMap[HtlcIncomingWatchAction] = append(
  1685  			actionMap[HtlcIncomingWatchAction], htlc,
  1686  		)
  1687  	}
  1688  
  1689  	return actionMap, nil
  1690  }
  1691  
  1692  // isPreimageAvailable returns whether the hash preimage is available in either
  1693  // the preimage cache or the invoice database.
  1694  func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
  1695  	error) {
  1696  
  1697  	// Start by checking the preimage cache for preimages of
  1698  	// forwarded HTLCs.
  1699  	_, preimageAvailable := c.cfg.PreimageDB.LookupPreimage(
  1700  		hash,
  1701  	)
  1702  	if preimageAvailable {
  1703  		return true, nil
  1704  	}
  1705  
  1706  	// Then check if we have an invoice that can be settled by this HTLC.
  1707  	//
  1708  	// TODO(joostjager): Check that there are still more blocks remaining
  1709  	// than the invoice cltv delta. We don't want to go to chain only to
  1710  	// have the incoming contest resolver decide that we don't want to
  1711  	// settle this invoice.
  1712  	invoice, err := c.cfg.Registry.LookupInvoice(hash)
  1713  	switch err {
  1714  	case nil:
  1715  	case channeldb.ErrInvoiceNotFound, channeldb.ErrNoInvoicesCreated:
  1716  		return false, nil
  1717  	default:
  1718  		return false, err
  1719  	}
  1720  
  1721  	preimageAvailable = invoice.Terms.PaymentPreimage != nil
  1722  
  1723  	return preimageAvailable, nil
  1724  }
  1725  
  1726  // checkLocalChainActions is similar to checkCommitChainActions, but it also
  1727  // examines the set of HTLCs on the remote party's commitment. This allows us
  1728  // to ensure we're able to satisfy the HTLC timeout constraints for incoming vs
  1729  // outgoing HTLCs.
  1730  func (c *ChannelArbitrator) checkLocalChainActions(
  1731  	height uint32, trigger transitionTrigger,
  1732  	activeHTLCs map[HtlcSetKey]htlcSet,
  1733  	commitsConfirmed bool) (ChainActionMap, error) {
  1734  
  1735  	// First, we'll check our local chain actions as normal. This will only
  1736  	// examine HTLCs on our local commitment (timeout or settle).
  1737  	localCommitActions, err := c.checkCommitChainActions(
  1738  		height, trigger, activeHTLCs[LocalHtlcSet],
  1739  	)
  1740  	if err != nil {
  1741  		return nil, err
  1742  	}
  1743  
  1744  	// Next, we'll examine the remote commitment (and maybe a dangling one)
  1745  	// to see if the set difference of our HTLCs is non-empty. If so, then
  1746  	// we may need to cancel back some HTLCs if we decide go to chain.
  1747  	remoteDanglingActions := c.checkRemoteDanglingActions(
  1748  		height, activeHTLCs, commitsConfirmed,
  1749  	)
  1750  
  1751  	// Finally, we'll merge the two set of chain actions.
  1752  	localCommitActions.Merge(remoteDanglingActions)
  1753  
  1754  	return localCommitActions, nil
  1755  }
  1756  
  1757  // checkRemoteDanglingActions examines the set of remote commitments for any
  1758  // HTLCs that are close to timing out. If we find any, then we'll return a set
  1759  // of chain actions for HTLCs that are on our commitment, but not theirs to
  1760  // cancel immediately.
  1761  func (c *ChannelArbitrator) checkRemoteDanglingActions(
  1762  	height uint32, activeHTLCs map[HtlcSetKey]htlcSet,
  1763  	commitsConfirmed bool) ChainActionMap {
  1764  
  1765  	var (
  1766  		pendingRemoteHTLCs []channeldb.HTLC
  1767  		localHTLCs         = make(map[uint64]struct{})
  1768  		remoteHTLCs        = make(map[uint64]channeldb.HTLC)
  1769  		actionMap          = make(ChainActionMap)
  1770  	)
  1771  
  1772  	// First, we'll construct two sets of the outgoing HTLCs: those on our
  1773  	// local commitment, and those that are on the remote commitment(s).
  1774  	for htlcSetKey, htlcs := range activeHTLCs {
  1775  		if htlcSetKey.IsRemote {
  1776  			for _, htlc := range htlcs.outgoingHTLCs {
  1777  				remoteHTLCs[htlc.HtlcIndex] = htlc
  1778  			}
  1779  		} else {
  1780  			for _, htlc := range htlcs.outgoingHTLCs {
  1781  				localHTLCs[htlc.HtlcIndex] = struct{}{}
  1782  			}
  1783  		}
  1784  	}
  1785  
  1786  	// With both sets constructed, we'll now compute the set difference of
  1787  	// our two sets of HTLCs. This'll give us the HTLCs that exist on the
  1788  	// remote commitment transaction, but not on ours.
  1789  	for htlcIndex, htlc := range remoteHTLCs {
  1790  		if _, ok := localHTLCs[htlcIndex]; ok {
  1791  			continue
  1792  		}
  1793  
  1794  		pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc)
  1795  	}
  1796  
  1797  	// Finally, we'll examine all the pending remote HTLCs for those that
  1798  	// have expired. If we find any, then we'll recommend that they be
  1799  	// failed now so we can free up the incoming HTLC.
  1800  	for _, htlc := range pendingRemoteHTLCs {
  1801  		// We'll now check if we need to go to chain in order to cancel
  1802  		// the incoming HTLC.
  1803  		goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
  1804  			height,
  1805  		)
  1806  
  1807  		// If we don't need to go to chain, and no commitments have
  1808  		// been confirmed, then we can move on. Otherwise, if
  1809  		// commitments have been confirmed, then we need to cancel back
  1810  		// *all* of the pending remote HTLCS.
  1811  		if !goToChain && !commitsConfirmed {
  1812  			continue
  1813  		}
  1814  
  1815  		log.Tracef("ChannelArbitrator(%v): immediately failing "+
  1816  			"htlc=%x from remote commitment",
  1817  			c.cfg.ChanPoint, htlc.RHash[:])
  1818  
  1819  		actionMap[HtlcFailNowAction] = append(
  1820  			actionMap[HtlcFailNowAction], htlc,
  1821  		)
  1822  	}
  1823  
  1824  	return actionMap
  1825  }
  1826  
  1827  // checkRemoteChainActions examines the two possible remote commitment chains
  1828  // and returns the set of chain actions we need to carry out if the remote
  1829  // commitment (non pending) confirms. The pendingConf indicates if the pending
  1830  // remote commitment confirmed. This is similar to checkCommitChainActions, but
  1831  // we'll immediately fail any HTLCs on the pending remote commit, but not the
  1832  // remote commit (or the other way around).
  1833  func (c *ChannelArbitrator) checkRemoteChainActions(
  1834  	height uint32, trigger transitionTrigger,
  1835  	activeHTLCs map[HtlcSetKey]htlcSet,
  1836  	pendingConf bool) (ChainActionMap, error) {
  1837  
  1838  	// First, we'll examine all the normal chain actions on the remote
  1839  	// commitment that confirmed.
  1840  	confHTLCs := activeHTLCs[RemoteHtlcSet]
  1841  	if pendingConf {
  1842  		confHTLCs = activeHTLCs[RemotePendingHtlcSet]
  1843  	}
  1844  	remoteCommitActions, err := c.checkCommitChainActions(
  1845  		height, trigger, confHTLCs,
  1846  	)
  1847  	if err != nil {
  1848  		return nil, err
  1849  	}
  1850  
  1851  	// With this actions computed, we'll now check the diff of the HTLCs on
  1852  	// the commitments, and cancel back any that are on the pending but not
  1853  	// the non-pending.
  1854  	remoteDiffActions := c.checkRemoteDiffActions(
  1855  		height, activeHTLCs, pendingConf,
  1856  	)
  1857  
  1858  	// Finally, we'll merge all the chain actions and the final set of
  1859  	// chain actions.
  1860  	remoteCommitActions.Merge(remoteDiffActions)
  1861  	return remoteCommitActions, nil
  1862  }
  1863  
  1864  // checkRemoteDiffActions checks the set difference of the HTLCs on the remote
  1865  // confirmed commit and remote dangling commit for HTLCS that we need to cancel
  1866  // back. If we find any HTLCs on the remote pending but not the remote, then
  1867  // we'll mark them to be failed immediately.
  1868  func (c *ChannelArbitrator) checkRemoteDiffActions(height uint32,
  1869  	activeHTLCs map[HtlcSetKey]htlcSet,
  1870  	pendingConf bool) ChainActionMap {
  1871  
  1872  	// First, we'll partition the HTLCs into those that are present on the
  1873  	// confirmed commitment, and those on the dangling commitment.
  1874  	confHTLCs := activeHTLCs[RemoteHtlcSet]
  1875  	danglingHTLCs := activeHTLCs[RemotePendingHtlcSet]
  1876  	if pendingConf {
  1877  		confHTLCs = activeHTLCs[RemotePendingHtlcSet]
  1878  		danglingHTLCs = activeHTLCs[RemoteHtlcSet]
  1879  	}
  1880  
  1881  	// Next, we'll create a set of all the HTLCs confirmed commitment.
  1882  	remoteHtlcs := make(map[uint64]struct{})
  1883  	for _, htlc := range confHTLCs.outgoingHTLCs {
  1884  		remoteHtlcs[htlc.HtlcIndex] = struct{}{}
  1885  	}
  1886  
  1887  	// With the remote HTLCs assembled, we'll mark any HTLCs only on the
  1888  	// remote dangling commitment to be failed asap.
  1889  	actionMap := make(ChainActionMap)
  1890  	for _, htlc := range danglingHTLCs.outgoingHTLCs {
  1891  		if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
  1892  			continue
  1893  		}
  1894  
  1895  		actionMap[HtlcFailNowAction] = append(
  1896  			actionMap[HtlcFailNowAction], htlc,
  1897  		)
  1898  
  1899  		log.Tracef("ChannelArbitrator(%v): immediately failing "+
  1900  			"htlc=%x from remote commitment",
  1901  			c.cfg.ChanPoint, htlc.RHash[:])
  1902  	}
  1903  
  1904  	return actionMap
  1905  }
  1906  
  1907  // constructChainActions returns the set of actions that should be taken for
  1908  // confirmed HTLCs at the specified height. Our actions will depend on the set
  1909  // of HTLCs that were active across all channels at the time of channel
  1910  // closure.
  1911  func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
  1912  	height uint32, trigger transitionTrigger) (ChainActionMap, error) {
  1913  
  1914  	// If we've reached this point and have not confirmed commitment set,
  1915  	// then this is an older node that had a pending close channel before
  1916  	// the CommitSet was introduced. In this case, we'll just return the
  1917  	// existing ChainActionMap they had on disk.
  1918  	if confCommitSet == nil {
  1919  		return c.log.FetchChainActions()
  1920  	}
  1921  
  1922  	// Otherwise we have the full commitment set written to disk, and can
  1923  	// proceed as normal.
  1924  	htlcSets := confCommitSet.toActiveHTLCSets()
  1925  	switch *confCommitSet.ConfCommitKey {
  1926  
  1927  	// If the local commitment transaction confirmed, then we'll examine
  1928  	// that as well as their commitments to the set of chain actions.
  1929  	case LocalHtlcSet:
  1930  		return c.checkLocalChainActions(
  1931  			height, trigger, htlcSets, true,
  1932  		)
  1933  
  1934  	// If the remote commitment confirmed, then we'll grab all the chain
  1935  	// actions for the remote commit, and check the pending commit for any
  1936  	// HTLCS we need to handle immediately (dust).
  1937  	case RemoteHtlcSet:
  1938  		return c.checkRemoteChainActions(
  1939  			height, trigger, htlcSets, false,
  1940  		)
  1941  
  1942  	// Otherwise, the remote pending commitment confirmed, so we'll examine
  1943  	// the HTLCs on that unrevoked dangling commitment.
  1944  	case RemotePendingHtlcSet:
  1945  		return c.checkRemoteChainActions(
  1946  			height, trigger, htlcSets, true,
  1947  		)
  1948  	}
  1949  
  1950  	return nil, fmt.Errorf("unable to locate chain actions")
  1951  }
  1952  
  1953  // prepContractResolutions is called either int he case that we decide we need
  1954  // to go to chain, or the remote party goes to chain. Given a set of actions we
  1955  // need to take for each HTLC, this method will return a set of contract
  1956  // resolvers that will resolve the contracts on-chain if needed, and also a set
  1957  // of packets to send to the htlcswitch in order to ensure all incoming HTLC's
  1958  // are properly resolved.
  1959  func (c *ChannelArbitrator) prepContractResolutions(
  1960  	contractResolutions *ContractResolutions, height uint32,
  1961  	trigger transitionTrigger,
  1962  	confCommitSet *CommitSet) ([]ContractResolver, []ResolutionMsg, error) {
  1963  
  1964  	// First, we'll reconstruct a fresh set of chain actions as the set of
  1965  	// actions we need to act on may differ based on if it was our
  1966  	// commitment, or they're commitment that hit the chain.
  1967  	htlcActions, err := c.constructChainActions(
  1968  		confCommitSet, height, trigger,
  1969  	)
  1970  	if err != nil {
  1971  		return nil, nil, err
  1972  	}
  1973  
  1974  	// We'll also fetch the historical state of this channel, as it should
  1975  	// have been marked as closed by now, and supplement it to each resolver
  1976  	// such that we can properly resolve our pending contracts.
  1977  	var chanState *channeldb.OpenChannel
  1978  	chanState, err = c.cfg.FetchHistoricalChannel()
  1979  	switch {
  1980  	// If we don't find this channel, then it may be the case that it
  1981  	// was closed before we started to retain the final state
  1982  	// information for open channels.
  1983  	case err == channeldb.ErrNoHistoricalBucket:
  1984  		fallthrough
  1985  	case err == channeldb.ErrChannelNotFound:
  1986  		log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
  1987  			"state", c.cfg.ChanPoint)
  1988  
  1989  	case err != nil:
  1990  		return nil, nil, err
  1991  	}
  1992  
  1993  	// There may be a class of HTLC's which we can fail back immediately,
  1994  	// for those we'll prepare a slice of packets to add to our outbox. Any
  1995  	// packets we need to send, will be cancels.
  1996  	var (
  1997  		msgsToSend []ResolutionMsg
  1998  	)
  1999  
  2000  	incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
  2001  	outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
  2002  
  2003  	// We'll use these two maps to quickly look up an active HTLC with its
  2004  	// matching HTLC resolution.
  2005  	outResolutionMap := make(map[wire.OutPoint]lnwallet.OutgoingHtlcResolution)
  2006  	inResolutionMap := make(map[wire.OutPoint]lnwallet.IncomingHtlcResolution)
  2007  	for i := 0; i < len(incomingResolutions); i++ {
  2008  		inRes := incomingResolutions[i]
  2009  		inResolutionMap[inRes.HtlcPoint()] = inRes
  2010  	}
  2011  	for i := 0; i < len(outgoingResolutions); i++ {
  2012  		outRes := outgoingResolutions[i]
  2013  		outResolutionMap[outRes.HtlcPoint()] = outRes
  2014  	}
  2015  
  2016  	// We'll create the resolver kit that we'll be cloning for each
  2017  	// resolver so they each can do their duty.
  2018  	resolverCfg := ResolverConfig{
  2019  		ChannelArbitratorConfig: c.cfg,
  2020  		Checkpoint: func(res ContractResolver,
  2021  			reports ...*channeldb.ResolverReport) error {
  2022  
  2023  			return c.log.InsertUnresolvedContracts(reports, res)
  2024  		},
  2025  	}
  2026  
  2027  	commitHash := contractResolutions.CommitHash
  2028  	failureMsg := &lnwire.FailPermanentChannelFailure{}
  2029  
  2030  	var htlcResolvers []ContractResolver
  2031  
  2032  	// We instantiate an anchor resolver if the commitment tx has an
  2033  	// anchor.
  2034  	if contractResolutions.AnchorResolution != nil {
  2035  		anchorResolver := newAnchorResolver(
  2036  			contractResolutions.AnchorResolution.AnchorSignDescriptor,
  2037  			contractResolutions.AnchorResolution.CommitAnchor,
  2038  			height, c.cfg.ChanPoint, resolverCfg,
  2039  		)
  2040  		htlcResolvers = append(htlcResolvers, anchorResolver)
  2041  	}
  2042  
  2043  	// If this is a breach close, we'll create a breach resolver, determine
  2044  	// the htlc's to fail back, and exit. This is done because the other
  2045  	// steps taken for non-breach-closes do not matter for breach-closes.
  2046  	if contractResolutions.BreachResolution != nil {
  2047  		breachResolver := newBreachResolver(resolverCfg)
  2048  		htlcResolvers = append(htlcResolvers, breachResolver)
  2049  
  2050  		// We'll use the CommitSet, we'll fail back all outgoing HTLC's
  2051  		// that exist on either of the remote commitments. The map is
  2052  		// used to deduplicate any shared htlc's.
  2053  		remoteOutgoing := make(map[uint64]channeldb.HTLC)
  2054  		for htlcSetKey, htlcs := range confCommitSet.HtlcSets {
  2055  			if !htlcSetKey.IsRemote {
  2056  				continue
  2057  			}
  2058  
  2059  			for _, htlc := range htlcs {
  2060  				if htlc.Incoming {
  2061  					continue
  2062  				}
  2063  
  2064  				remoteOutgoing[htlc.HtlcIndex] = htlc
  2065  			}
  2066  		}
  2067  
  2068  		// Now we'll loop over the map and create ResolutionMsgs for
  2069  		// each of them.
  2070  		for _, htlc := range remoteOutgoing {
  2071  			failMsg := ResolutionMsg{
  2072  				SourceChan: c.cfg.ShortChanID,
  2073  				HtlcIndex:  htlc.HtlcIndex,
  2074  				Failure:    failureMsg,
  2075  			}
  2076  
  2077  			msgsToSend = append(msgsToSend, failMsg)
  2078  		}
  2079  
  2080  		return htlcResolvers, msgsToSend, nil
  2081  	}
  2082  
  2083  	// For each HTLC, we'll either act immediately, meaning we'll instantly
  2084  	// fail the HTLC, or we'll act only once the transaction has been
  2085  	// confirmed, in which case we'll need an HTLC resolver.
  2086  	for htlcAction, htlcs := range htlcActions {
  2087  		switch htlcAction {
  2088  
  2089  		// If we can fail an HTLC immediately (an outgoing HTLC with no
  2090  		// contract), then we'll assemble an HTLC fail packet to send.
  2091  		case HtlcFailNowAction:
  2092  			for _, htlc := range htlcs {
  2093  				failMsg := ResolutionMsg{
  2094  					SourceChan: c.cfg.ShortChanID,
  2095  					HtlcIndex:  htlc.HtlcIndex,
  2096  					Failure:    failureMsg,
  2097  				}
  2098  
  2099  				msgsToSend = append(msgsToSend, failMsg)
  2100  			}
  2101  
  2102  		// If we can claim this HTLC, we'll create an HTLC resolver to
  2103  		// claim the HTLC (second-level or directly), then add the pre
  2104  		case HtlcClaimAction:
  2105  			for _, htlc := range htlcs {
  2106  				htlc := htlc
  2107  
  2108  				htlcOp := wire.OutPoint{
  2109  					Hash:  commitHash,
  2110  					Index: uint32(htlc.OutputIndex),
  2111  				}
  2112  
  2113  				resolution, ok := inResolutionMap[htlcOp]
  2114  				if !ok {
  2115  					// TODO(roasbeef): panic?
  2116  					log.Errorf("ChannelArbitrator(%v) unable to find "+
  2117  						"incoming resolution: %v",
  2118  						c.cfg.ChanPoint, htlcOp)
  2119  					continue
  2120  				}
  2121  
  2122  				resolver := newSuccessResolver(
  2123  					resolution, height, htlc, resolverCfg,
  2124  				)
  2125  				htlcResolvers = append(htlcResolvers, resolver)
  2126  			}
  2127  
  2128  		// If we can timeout the HTLC directly, then we'll create the
  2129  		// proper resolver to do so, who will then cancel the packet
  2130  		// backwards.
  2131  		case HtlcTimeoutAction:
  2132  			for _, htlc := range htlcs {
  2133  				htlc := htlc
  2134  
  2135  				htlcOp := wire.OutPoint{
  2136  					Hash:  commitHash,
  2137  					Index: uint32(htlc.OutputIndex),
  2138  				}
  2139  
  2140  				resolution, ok := outResolutionMap[htlcOp]
  2141  				if !ok {
  2142  					log.Errorf("ChannelArbitrator(%v) unable to find "+
  2143  						"outgoing resolution: %v", c.cfg.ChanPoint, htlcOp)
  2144  					continue
  2145  				}
  2146  
  2147  				resolver := newTimeoutResolver(
  2148  					resolution, height, htlc, resolverCfg,
  2149  				)
  2150  				if chanState != nil {
  2151  					resolver.SupplementState(chanState)
  2152  				}
  2153  				htlcResolvers = append(htlcResolvers, resolver)
  2154  			}
  2155  
  2156  		// If this is an incoming HTLC, but we can't act yet, then
  2157  		// we'll create an incoming resolver to redeem the HTLC if we
  2158  		// learn of the pre-image, or let the remote party time out.
  2159  		case HtlcIncomingWatchAction:
  2160  			for _, htlc := range htlcs {
  2161  				htlc := htlc
  2162  
  2163  				htlcOp := wire.OutPoint{
  2164  					Hash:  commitHash,
  2165  					Index: uint32(htlc.OutputIndex),
  2166  				}
  2167  
  2168  				// TODO(roasbeef): need to handle incoming dust...
  2169  
  2170  				// TODO(roasbeef): can't be negative!!!
  2171  				resolution, ok := inResolutionMap[htlcOp]
  2172  				if !ok {
  2173  					log.Errorf("ChannelArbitrator(%v) unable to find "+
  2174  						"incoming resolution: %v",
  2175  						c.cfg.ChanPoint, htlcOp)
  2176  					continue
  2177  				}
  2178  
  2179  				resolver := newIncomingContestResolver(
  2180  					resolution, height, htlc,
  2181  					resolverCfg,
  2182  				)
  2183  				htlcResolvers = append(htlcResolvers, resolver)
  2184  			}
  2185  
  2186  		// Finally, if this is an outgoing HTLC we've sent, then we'll
  2187  		// launch a resolver to watch for the pre-image (and settle
  2188  		// backwards), or just timeout.
  2189  		case HtlcOutgoingWatchAction:
  2190  			for _, htlc := range htlcs {
  2191  				htlc := htlc
  2192  
  2193  				htlcOp := wire.OutPoint{
  2194  					Hash:  commitHash,
  2195  					Index: uint32(htlc.OutputIndex),
  2196  				}
  2197  
  2198  				resolution, ok := outResolutionMap[htlcOp]
  2199  				if !ok {
  2200  					log.Errorf("ChannelArbitrator(%v) unable to find "+
  2201  						"outgoing resolution: %v",
  2202  						c.cfg.ChanPoint, htlcOp)
  2203  					continue
  2204  				}
  2205  
  2206  				resolver := newOutgoingContestResolver(
  2207  					resolution, height, htlc, resolverCfg,
  2208  				)
  2209  				if chanState != nil {
  2210  					resolver.SupplementState(chanState)
  2211  				}
  2212  				htlcResolvers = append(htlcResolvers, resolver)
  2213  			}
  2214  		}
  2215  	}
  2216  
  2217  	// If this is was an unilateral closure, then we'll also create a
  2218  	// resolver to sweep our commitment output (but only if it wasn't
  2219  	// trimmed).
  2220  	if contractResolutions.CommitResolution != nil {
  2221  		resolver := newCommitSweepResolver(
  2222  			*contractResolutions.CommitResolution, height,
  2223  			c.cfg.ChanPoint, resolverCfg,
  2224  		)
  2225  		if chanState != nil {
  2226  			resolver.SupplementState(chanState)
  2227  		}
  2228  		htlcResolvers = append(htlcResolvers, resolver)
  2229  	}
  2230  
  2231  	return htlcResolvers, msgsToSend, nil
  2232  }
  2233  
  2234  // replaceResolver replaces a in the list of active resolvers. If the resolver
  2235  // to be replaced is not found, it returns an error.
  2236  func (c *ChannelArbitrator) replaceResolver(oldResolver,
  2237  	newResolver ContractResolver) error {
  2238  
  2239  	c.activeResolversLock.Lock()
  2240  	defer c.activeResolversLock.Unlock()
  2241  
  2242  	oldKey := oldResolver.ResolverKey()
  2243  	for i, r := range c.activeResolvers {
  2244  		if bytes.Equal(r.ResolverKey(), oldKey) {
  2245  			c.activeResolvers[i] = newResolver
  2246  			return nil
  2247  		}
  2248  	}
  2249  
  2250  	return errors.New("resolver to be replaced not found")
  2251  }
  2252  
  2253  // resolveContract is a goroutine tasked with fully resolving an unresolved
  2254  // contract. Either the initial contract will be resolved after a single step,
  2255  // or the contract will itself create another contract to be resolved. In
  2256  // either case, one the contract has been fully resolved, we'll signal back to
  2257  // the main goroutine so it can properly keep track of the set of unresolved
  2258  // contracts.
  2259  //
  2260  // NOTE: This MUST be run as a goroutine.
  2261  func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) {
  2262  	defer c.wg.Done()
  2263  
  2264  	log.Debugf("ChannelArbitrator(%v): attempting to resolve %T",
  2265  		c.cfg.ChanPoint, currentContract)
  2266  
  2267  	// Until the contract is fully resolved, we'll continue to iteratively
  2268  	// resolve the contract one step at a time.
  2269  	for !currentContract.IsResolved() {
  2270  		log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved",
  2271  			c.cfg.ChanPoint, currentContract)
  2272  
  2273  		select {
  2274  
  2275  		// If we've been signalled to quit, then we'll exit early.
  2276  		case <-c.quit:
  2277  			return
  2278  
  2279  		default:
  2280  			// Otherwise, we'll attempt to resolve the current
  2281  			// contract.
  2282  			nextContract, err := currentContract.Resolve()
  2283  			if err != nil {
  2284  				if err == errResolverShuttingDown {
  2285  					return
  2286  				}
  2287  
  2288  				log.Errorf("ChannelArbitrator(%v): unable to "+
  2289  					"progress %T: %v",
  2290  					c.cfg.ChanPoint, currentContract, err)
  2291  				return
  2292  			}
  2293  
  2294  			switch {
  2295  			// If this contract produced another, then this means
  2296  			// the current contract was only able to be partially
  2297  			// resolved in this step. So we'll do a contract swap
  2298  			// within our logs: the new contract will take the
  2299  			// place of the old one.
  2300  			case nextContract != nil:
  2301  				log.Debugf("ChannelArbitrator(%v): swapping "+
  2302  					"out contract %T for %T ",
  2303  					c.cfg.ChanPoint, currentContract,
  2304  					nextContract)
  2305  
  2306  				// Swap contract in log.
  2307  				err := c.log.SwapContract(
  2308  					currentContract, nextContract,
  2309  				)
  2310  				if err != nil {
  2311  					log.Errorf("unable to add recurse "+
  2312  						"contract: %v", err)
  2313  				}
  2314  
  2315  				// Swap contract in resolvers list. This is to
  2316  				// make sure that reports are queried from the
  2317  				// new resolver.
  2318  				err = c.replaceResolver(
  2319  					currentContract, nextContract,
  2320  				)
  2321  				if err != nil {
  2322  					log.Errorf("unable to replace "+
  2323  						"contract: %v", err)
  2324  				}
  2325  
  2326  				// As this contract produced another, we'll
  2327  				// re-assign, so we can continue our resolution
  2328  				// loop.
  2329  				currentContract = nextContract
  2330  
  2331  			// If this contract is actually fully resolved, then
  2332  			// we'll mark it as such within the database.
  2333  			case currentContract.IsResolved():
  2334  				log.Debugf("ChannelArbitrator(%v): marking "+
  2335  					"contract %T fully resolved",
  2336  					c.cfg.ChanPoint, currentContract)
  2337  
  2338  				err := c.log.ResolveContract(currentContract)
  2339  				if err != nil {
  2340  					log.Errorf("unable to resolve contract: %v",
  2341  						err)
  2342  				}
  2343  
  2344  				// Now that the contract has been resolved,
  2345  				// well signal to the main goroutine.
  2346  				select {
  2347  				case c.resolutionSignal <- struct{}{}:
  2348  				case <-c.quit:
  2349  					return
  2350  				}
  2351  			}
  2352  
  2353  		}
  2354  	}
  2355  }
  2356  
  2357  // signalUpdateMsg is a struct that carries fresh signals to the
  2358  // ChannelArbitrator. We need to receive a message like this each time the
  2359  // channel becomes active, as it's internal state may change.
  2360  type signalUpdateMsg struct {
  2361  	// newSignals is the set of new active signals to be sent to the
  2362  	// arbitrator.
  2363  	newSignals *ContractSignals
  2364  
  2365  	// doneChan is a channel that will be closed on the arbitrator has
  2366  	// attached the new signals.
  2367  	doneChan chan struct{}
  2368  }
  2369  
  2370  // UpdateContractSignals updates the set of signals the ChannelArbitrator needs
  2371  // to receive from a channel in real-time in order to keep in sync with the
  2372  // latest state of the contract.
  2373  func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) {
  2374  	done := make(chan struct{})
  2375  
  2376  	select {
  2377  	case c.signalUpdates <- &signalUpdateMsg{
  2378  		newSignals: newSignals,
  2379  		doneChan:   done,
  2380  	}:
  2381  	case <-c.quit:
  2382  	}
  2383  
  2384  	select {
  2385  	case <-done:
  2386  	case <-c.quit:
  2387  	}
  2388  }
  2389  
  2390  // channelAttendant is the primary goroutine that acts at the judicial
  2391  // arbitrator between our channel state, the remote channel peer, and the
  2392  // blockchain (Our judge). This goroutine will ensure that we faithfully execute
  2393  // all clauses of our contract in the case that we need to go on-chain for a
  2394  // dispute. Currently, two such conditions warrant our intervention: when an
  2395  // outgoing HTLC is about to timeout, and when we know the pre-image for an
  2396  // incoming HTLC, but it hasn't yet been settled off-chain. In these cases,
  2397  // we'll: broadcast our commitment, cancel/settle any HTLC's backwards after
  2398  // sufficient confirmation, and finally send our set of outputs to the UTXO
  2399  // Nursery for incubation, and ultimate sweeping.
  2400  //
  2401  // NOTE: This MUST be run as a goroutine.
  2402  func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
  2403  
  2404  	// TODO(roasbeef): tell top chain arb we're done
  2405  	defer func() {
  2406  		c.wg.Done()
  2407  	}()
  2408  
  2409  	for {
  2410  		select {
  2411  
  2412  		// A new block has arrived, we'll examine all the active HTLC's
  2413  		// to see if any of them have expired, and also update our
  2414  		// track of the best current height.
  2415  		case blockHeight, ok := <-c.blocks:
  2416  			if !ok {
  2417  				return
  2418  			}
  2419  			bestHeight = blockHeight
  2420  
  2421  			// If we're not in the default state, then we can
  2422  			// ignore this signal as we're waiting for contract
  2423  			// resolution.
  2424  			if c.state != StateDefault {
  2425  				continue
  2426  			}
  2427  
  2428  			// Now that a new block has arrived, we'll attempt to
  2429  			// advance our state forward.
  2430  			nextState, _, err := c.advanceState(
  2431  				uint32(bestHeight), chainTrigger, nil,
  2432  			)
  2433  			if err != nil {
  2434  				log.Errorf("Unable to advance state: %v", err)
  2435  			}
  2436  
  2437  			// If as a result of this trigger, the contract is
  2438  			// fully resolved, then well exit.
  2439  			if nextState == StateFullyResolved {
  2440  				return
  2441  			}
  2442  
  2443  		// A new signal update was just sent. This indicates that the
  2444  		// channel under watch is now live, and may modify its internal
  2445  		// state, so we'll get the most up to date signals to we can
  2446  		// properly do our job.
  2447  		case signalUpdate := <-c.signalUpdates:
  2448  			log.Tracef("ChannelArbitrator(%v) got new signal "+
  2449  				"update!", c.cfg.ChanPoint)
  2450  
  2451  			// First, we'll update our set of signals.
  2452  			c.htlcUpdates = signalUpdate.newSignals.HtlcUpdates
  2453  			c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID
  2454  
  2455  			// Now that the signals have been updated, we'll now
  2456  			// close the done channel to signal to the caller we've
  2457  			// registered the new contracts.
  2458  			close(signalUpdate.doneChan)
  2459  
  2460  		// A new set of HTLC's has been added or removed from the
  2461  		// commitment transaction. So we'll update our activeHTLCs map
  2462  		// accordingly.
  2463  		case htlcUpdate := <-c.htlcUpdates:
  2464  			// We'll wipe out our old set of HTLC's for each
  2465  			// htlcSetKey type included in this update in order to
  2466  			// only monitor the HTLCs that are still active on this
  2467  			// target commitment.
  2468  			c.activeHTLCs[htlcUpdate.HtlcKey] = newHtlcSet(
  2469  				htlcUpdate.Htlcs,
  2470  			)
  2471  
  2472  			log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
  2473  				c.cfg.ChanPoint,
  2474  				newLogClosure(func() string {
  2475  					return spew.Sdump(htlcUpdate)
  2476  				}),
  2477  			)
  2478  
  2479  		// We've cooperatively closed the channel, so we're no longer
  2480  		// needed. We'll mark the channel as resolved and exit.
  2481  		case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
  2482  			log.Infof("ChannelArbitrator(%v) marking channel "+
  2483  				"cooperatively closed", c.cfg.ChanPoint)
  2484  
  2485  			err := c.cfg.MarkChannelClosed(
  2486  				closeInfo.ChannelCloseSummary,
  2487  				channeldb.ChanStatusCoopBroadcasted,
  2488  			)
  2489  			if err != nil {
  2490  				log.Errorf("Unable to mark channel closed: "+
  2491  					"%v", err)
  2492  				return
  2493  			}
  2494  
  2495  			// We'll now advance our state machine until it reaches
  2496  			// a terminal state, and the channel is marked resolved.
  2497  			_, _, err = c.advanceState(
  2498  				closeInfo.CloseHeight, coopCloseTrigger, nil,
  2499  			)
  2500  			if err != nil {
  2501  				log.Errorf("Unable to advance state: %v", err)
  2502  				return
  2503  			}
  2504  
  2505  		// We have broadcasted our commitment, and it is now confirmed
  2506  		// on-chain.
  2507  		case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
  2508  			log.Infof("ChannelArbitrator(%v): local on-chain "+
  2509  				"channel close", c.cfg.ChanPoint)
  2510  
  2511  			if c.state != StateCommitmentBroadcasted {
  2512  				log.Errorf("ChannelArbitrator(%v): unexpected "+
  2513  					"local on-chain channel close",
  2514  					c.cfg.ChanPoint)
  2515  			}
  2516  			closeTx := closeInfo.CloseTx
  2517  
  2518  			contractRes := &ContractResolutions{
  2519  				CommitHash:       closeTx.TxHash(),
  2520  				CommitResolution: closeInfo.CommitResolution,
  2521  				HtlcResolutions:  *closeInfo.HtlcResolutions,
  2522  				AnchorResolution: closeInfo.AnchorResolution,
  2523  			}
  2524  
  2525  			// When processing a unilateral close event, we'll
  2526  			// transition to the ContractClosed state. We'll log
  2527  			// out the set of resolutions such that they are
  2528  			// available to fetch in that state, we'll also write
  2529  			// the commit set so we can reconstruct our chain
  2530  			// actions on restart.
  2531  			err := c.log.LogContractResolutions(contractRes)
  2532  			if err != nil {
  2533  				log.Errorf("Unable to write resolutions: %v",
  2534  					err)
  2535  				return
  2536  			}
  2537  			err = c.log.InsertConfirmedCommitSet(
  2538  				&closeInfo.CommitSet,
  2539  			)
  2540  			if err != nil {
  2541  				log.Errorf("Unable to write commit set: %v",
  2542  					err)
  2543  				return
  2544  			}
  2545  
  2546  			// After the set of resolutions are successfully
  2547  			// logged, we can safely close the channel. After this
  2548  			// succeeds we won't be getting chain events anymore,
  2549  			// so we must make sure we can recover on restart after
  2550  			// it is marked closed. If the next state transition
  2551  			// fails, we'll start up in the prior state again, and
  2552  			// we won't be longer getting chain events. In this
  2553  			// case we must manually re-trigger the state
  2554  			// transition into StateContractClosed based on the
  2555  			// close status of the channel.
  2556  			err = c.cfg.MarkChannelClosed(
  2557  				closeInfo.ChannelCloseSummary,
  2558  				channeldb.ChanStatusLocalCloseInitiator,
  2559  			)
  2560  			if err != nil {
  2561  				log.Errorf("Unable to mark "+
  2562  					"channel closed: %v", err)
  2563  				return
  2564  			}
  2565  
  2566  			// We'll now advance our state machine until it reaches
  2567  			// a terminal state.
  2568  			_, _, err = c.advanceState(
  2569  				uint32(closeInfo.SpendingHeight),
  2570  				localCloseTrigger, &closeInfo.CommitSet,
  2571  			)
  2572  			if err != nil {
  2573  				log.Errorf("Unable to advance state: %v", err)
  2574  			}
  2575  
  2576  		// The remote party has broadcast the commitment on-chain.
  2577  		// We'll examine our state to determine if we need to act at
  2578  		// all.
  2579  		case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
  2580  			log.Infof("ChannelArbitrator(%v): remote party has "+
  2581  				"closed channel out on-chain", c.cfg.ChanPoint)
  2582  
  2583  			// If we don't have a self output, and there are no
  2584  			// active HTLC's, then we can immediately mark the
  2585  			// contract as fully resolved and exit.
  2586  			contractRes := &ContractResolutions{
  2587  				CommitHash:       *uniClosure.SpenderTxHash,
  2588  				CommitResolution: uniClosure.CommitResolution,
  2589  				HtlcResolutions:  *uniClosure.HtlcResolutions,
  2590  				AnchorResolution: uniClosure.AnchorResolution,
  2591  			}
  2592  
  2593  			// When processing a unilateral close event, we'll
  2594  			// transition to the ContractClosed state. We'll log
  2595  			// out the set of resolutions such that they are
  2596  			// available to fetch in that state, we'll also write
  2597  			// the commit set so we can reconstruct our chain
  2598  			// actions on restart.
  2599  			err := c.log.LogContractResolutions(contractRes)
  2600  			if err != nil {
  2601  				log.Errorf("Unable to write resolutions: %v",
  2602  					err)
  2603  				return
  2604  			}
  2605  			err = c.log.InsertConfirmedCommitSet(
  2606  				&uniClosure.CommitSet,
  2607  			)
  2608  			if err != nil {
  2609  				log.Errorf("Unable to write commit set: %v",
  2610  					err)
  2611  				return
  2612  			}
  2613  
  2614  			// After the set of resolutions are successfully
  2615  			// logged, we can safely close the channel. After this
  2616  			// succeeds we won't be getting chain events anymore,
  2617  			// so we must make sure we can recover on restart after
  2618  			// it is marked closed. If the next state transition
  2619  			// fails, we'll start up in the prior state again, and
  2620  			// we won't be longer getting chain events. In this
  2621  			// case we must manually re-trigger the state
  2622  			// transition into StateContractClosed based on the
  2623  			// close status of the channel.
  2624  			closeSummary := &uniClosure.ChannelCloseSummary
  2625  			err = c.cfg.MarkChannelClosed(
  2626  				closeSummary,
  2627  				channeldb.ChanStatusRemoteCloseInitiator,
  2628  			)
  2629  			if err != nil {
  2630  				log.Errorf("Unable to mark channel closed: %v",
  2631  					err)
  2632  				return
  2633  			}
  2634  
  2635  			// We'll now advance our state machine until it reaches
  2636  			// a terminal state.
  2637  			_, _, err = c.advanceState(
  2638  				uint32(uniClosure.SpendingHeight),
  2639  				remoteCloseTrigger, &uniClosure.CommitSet,
  2640  			)
  2641  			if err != nil {
  2642  				log.Errorf("Unable to advance state: %v", err)
  2643  			}
  2644  
  2645  		// The remote has breached the channel. As this is handled by
  2646  		// the ChainWatcher and BreachArbiter, we don't have to do
  2647  		// anything in particular, so just advance our state and
  2648  		// gracefully exit.
  2649  		case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
  2650  			log.Infof("ChannelArbitrator(%v): remote party has "+
  2651  				"breached channel!", c.cfg.ChanPoint)
  2652  
  2653  			// In the breach case, we'll only have anchor and
  2654  			// breach resolutions.
  2655  			contractRes := &ContractResolutions{
  2656  				CommitHash:       breachInfo.CommitHash,
  2657  				BreachResolution: breachInfo.BreachResolution,
  2658  				AnchorResolution: breachInfo.AnchorResolution,
  2659  			}
  2660  
  2661  			// We'll transition to the ContractClosed state and log
  2662  			// the set of resolutions such that they can be turned
  2663  			// into resolvers later on. We'll also insert the
  2664  			// CommitSet of the latest set of commitments.
  2665  			err := c.log.LogContractResolutions(contractRes)
  2666  			if err != nil {
  2667  				log.Errorf("Unable to write resolutions: %v",
  2668  					err)
  2669  				return
  2670  			}
  2671  			err = c.log.InsertConfirmedCommitSet(
  2672  				&breachInfo.CommitSet,
  2673  			)
  2674  			if err != nil {
  2675  				log.Errorf("Unable to write commit set: %v",
  2676  					err)
  2677  				return
  2678  			}
  2679  
  2680  			// The channel is finally marked pending closed here as
  2681  			// the breacharbiter and channel arbitrator have
  2682  			// persisted the relevant states.
  2683  			closeSummary := &breachInfo.CloseSummary
  2684  			err = c.cfg.MarkChannelClosed(
  2685  				closeSummary,
  2686  				channeldb.ChanStatusRemoteCloseInitiator,
  2687  			)
  2688  			if err != nil {
  2689  				log.Errorf("Unable to mark channel closed: %v",
  2690  					err)
  2691  				return
  2692  			}
  2693  
  2694  			log.Infof("Breached channel=%v marked pending-closed",
  2695  				breachInfo.BreachResolution.FundingOutPoint)
  2696  
  2697  			// We'll advance our state machine until it reaches a
  2698  			// terminal state.
  2699  			_, _, err = c.advanceState(
  2700  				uint32(bestHeight), breachCloseTrigger,
  2701  				&breachInfo.CommitSet,
  2702  			)
  2703  			if err != nil {
  2704  				log.Errorf("Unable to advance state: %v", err)
  2705  			}
  2706  
  2707  		// A new contract has just been resolved, we'll now check our
  2708  		// log to see if all contracts have been resolved. If so, then
  2709  		// we can exit as the contract is fully resolved.
  2710  		case <-c.resolutionSignal:
  2711  			log.Infof("ChannelArbitrator(%v): a contract has been "+
  2712  				"fully resolved!", c.cfg.ChanPoint)
  2713  
  2714  			nextState, _, err := c.advanceState(
  2715  				uint32(bestHeight), chainTrigger, nil,
  2716  			)
  2717  			if err != nil {
  2718  				log.Errorf("Unable to advance state: %v", err)
  2719  			}
  2720  
  2721  			// If we don't have anything further to do after
  2722  			// advancing our state, then we'll exit.
  2723  			if nextState == StateFullyResolved {
  2724  				log.Infof("ChannelArbitrator(%v): all "+
  2725  					"contracts fully resolved, exiting",
  2726  					c.cfg.ChanPoint)
  2727  
  2728  				return
  2729  			}
  2730  
  2731  		// We've just received a request to forcibly close out the
  2732  		// channel. We'll
  2733  		case closeReq := <-c.forceCloseReqs:
  2734  			if c.state != StateDefault {
  2735  				select {
  2736  				case closeReq.closeTx <- nil:
  2737  				case <-c.quit:
  2738  				}
  2739  
  2740  				select {
  2741  				case closeReq.errResp <- errAlreadyForceClosed:
  2742  				case <-c.quit:
  2743  				}
  2744  
  2745  				continue
  2746  			}
  2747  
  2748  			nextState, closeTx, err := c.advanceState(
  2749  				uint32(bestHeight), userTrigger, nil,
  2750  			)
  2751  			if err != nil {
  2752  				log.Errorf("Unable to advance state: %v", err)
  2753  			}
  2754  
  2755  			select {
  2756  			case closeReq.closeTx <- closeTx:
  2757  			case <-c.quit:
  2758  				return
  2759  			}
  2760  
  2761  			select {
  2762  			case closeReq.errResp <- err:
  2763  			case <-c.quit:
  2764  				return
  2765  			}
  2766  
  2767  			// If we don't have anything further to do after
  2768  			// advancing our state, then we'll exit.
  2769  			if nextState == StateFullyResolved {
  2770  				log.Infof("ChannelArbitrator(%v): all "+
  2771  					"contracts resolved, exiting",
  2772  					c.cfg.ChanPoint)
  2773  				return
  2774  			}
  2775  
  2776  		case <-c.quit:
  2777  			return
  2778  		}
  2779  	}
  2780  }
  2781  
  2782  // checkLegacyBreach returns StateFullyResolved if the channel was closed with
  2783  // a breach transaction before the channel arbitrator launched its own breach
  2784  // resolver. StateContractClosed is returned if this is a modern breach close
  2785  // with a breach resolver. StateError is returned if the log lookup failed.
  2786  func (c *ChannelArbitrator) checkLegacyBreach() (ArbitratorState, error) {
  2787  	// A previous version of the channel arbitrator would make the breach
  2788  	// close skip to StateFullyResolved. If there are no contract
  2789  	// resolutions in the bolt arbitrator log, then this is an older breach
  2790  	// close. Otherwise, if there are resolutions, the state should advance
  2791  	// to StateContractClosed.
  2792  	_, err := c.log.FetchContractResolutions()
  2793  	if err == errNoResolutions {
  2794  		// This is an older breach close still in the database.
  2795  		return StateFullyResolved, nil
  2796  	} else if err != nil {
  2797  		return StateError, err
  2798  	}
  2799  
  2800  	// This is a modern breach close with resolvers.
  2801  	return StateContractClosed, nil
  2802  }