github.com/decred/dcrlnd@v0.7.6/contractcourt/channel_arbitrator_test.go (about)

     1  package contractcourt
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"os"
     8  	"path/filepath"
     9  	"reflect"
    10  	"sort"
    11  	"sync"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/decred/dcrd/chaincfg/chainhash"
    16  	"github.com/decred/dcrd/chaincfg/v3"
    17  	"github.com/decred/dcrd/dcrutil/v4"
    18  	"github.com/decred/dcrd/wire"
    19  	"github.com/decred/dcrlnd/chainntnfs"
    20  	"github.com/decred/dcrlnd/channeldb"
    21  	"github.com/decred/dcrlnd/clock"
    22  	"github.com/decred/dcrlnd/input"
    23  	"github.com/decred/dcrlnd/kvdb"
    24  	"github.com/decred/dcrlnd/lntest/mock"
    25  	"github.com/decred/dcrlnd/lntypes"
    26  	"github.com/decred/dcrlnd/lnwallet"
    27  	"github.com/decred/dcrlnd/lnwire"
    28  	"github.com/stretchr/testify/require"
    29  )
    30  
    31  const (
    32  	defaultTimeout = time.Second * 5
    33  
    34  	// stateTimeout is the timeout we allow when waiting for state
    35  	// transitions.
    36  	stateTimeout = time.Second * 15
    37  )
    38  
    39  type mockArbitratorLog struct {
    40  	state           ArbitratorState
    41  	newStates       chan ArbitratorState
    42  	failLog         bool
    43  	failFetch       error
    44  	failCommit      bool
    45  	failCommitState ArbitratorState
    46  	resolutions     *ContractResolutions
    47  	resolvers       map[ContractResolver]struct{}
    48  
    49  	commitSet *CommitSet
    50  
    51  	sync.Mutex
    52  }
    53  
    54  // A compile time check to ensure mockArbitratorLog meets the ArbitratorLog
    55  // interface.
    56  var _ ArbitratorLog = (*mockArbitratorLog)(nil)
    57  
    58  func (b *mockArbitratorLog) CurrentState(kvdb.RTx) (ArbitratorState, error) {
    59  	return b.state, nil
    60  }
    61  
    62  func (b *mockArbitratorLog) CommitState(s ArbitratorState) error {
    63  	if b.failCommit && s == b.failCommitState {
    64  		return fmt.Errorf("intentional commit error at state %v",
    65  			b.failCommitState)
    66  	}
    67  	b.state = s
    68  	b.newStates <- s
    69  	return nil
    70  }
    71  
    72  func (b *mockArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver,
    73  	error) {
    74  
    75  	b.Lock()
    76  	v := make([]ContractResolver, len(b.resolvers))
    77  	idx := 0
    78  	for resolver := range b.resolvers {
    79  		v[idx] = resolver
    80  		idx++
    81  	}
    82  	b.Unlock()
    83  
    84  	return v, nil
    85  }
    86  
    87  func (b *mockArbitratorLog) InsertUnresolvedContracts(_ []*channeldb.ResolverReport,
    88  	resolvers ...ContractResolver) error {
    89  
    90  	b.Lock()
    91  	for _, resolver := range resolvers {
    92  		resKey := resolver.ResolverKey()
    93  		if resKey == nil {
    94  			continue
    95  		}
    96  
    97  		b.resolvers[resolver] = struct{}{}
    98  	}
    99  	b.Unlock()
   100  	return nil
   101  }
   102  
   103  func (b *mockArbitratorLog) SwapContract(oldContract,
   104  	newContract ContractResolver) error {
   105  
   106  	b.Lock()
   107  	delete(b.resolvers, oldContract)
   108  	b.resolvers[newContract] = struct{}{}
   109  	b.Unlock()
   110  
   111  	return nil
   112  }
   113  
   114  func (b *mockArbitratorLog) ResolveContract(res ContractResolver) error {
   115  	b.Lock()
   116  	delete(b.resolvers, res)
   117  	b.Unlock()
   118  
   119  	return nil
   120  }
   121  
   122  func (b *mockArbitratorLog) LogContractResolutions(c *ContractResolutions) error {
   123  	if b.failLog {
   124  		return fmt.Errorf("intentional log failure")
   125  	}
   126  	b.resolutions = c
   127  	return nil
   128  }
   129  
   130  func (b *mockArbitratorLog) FetchContractResolutions() (*ContractResolutions, error) {
   131  	if b.failFetch != nil {
   132  		return nil, b.failFetch
   133  	}
   134  
   135  	return b.resolutions, nil
   136  }
   137  
   138  func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, error) {
   139  	return nil, nil
   140  }
   141  
   142  func (b *mockArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error {
   143  	b.commitSet = c
   144  	return nil
   145  }
   146  
   147  func (b *mockArbitratorLog) FetchConfirmedCommitSet(kvdb.RTx) (*CommitSet, error) {
   148  	return b.commitSet, nil
   149  }
   150  
   151  func (b *mockArbitratorLog) WipeHistory() error {
   152  	return nil
   153  }
   154  
   155  // testArbLog is a wrapper around an existing (ideally fully concrete
   156  // ArbitratorLog) that lets us intercept certain calls like transitioning to a
   157  // new state.
   158  type testArbLog struct {
   159  	ArbitratorLog
   160  
   161  	newStates chan ArbitratorState
   162  }
   163  
   164  func (t *testArbLog) CommitState(s ArbitratorState) error {
   165  	if err := t.ArbitratorLog.CommitState(s); err != nil {
   166  		return err
   167  	}
   168  
   169  	t.newStates <- s
   170  
   171  	return nil
   172  }
   173  
   174  type mockChainIO struct{}
   175  
   176  var _ lnwallet.BlockChainIO = (*mockChainIO)(nil)
   177  
   178  func (*mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) {
   179  	return nil, 0, nil
   180  }
   181  
   182  func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte,
   183  	heightHint uint32, _ <-chan struct{}) (*wire.TxOut, error) {
   184  	return nil, nil
   185  }
   186  
   187  func (*mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
   188  	return nil, nil
   189  }
   190  
   191  func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
   192  	return nil, nil
   193  }
   194  
   195  type chanArbTestCtx struct {
   196  	t *testing.T
   197  
   198  	chanArb *ChannelArbitrator
   199  
   200  	cleanUp func()
   201  
   202  	resolvedChan chan struct{}
   203  
   204  	incubationRequests chan struct{}
   205  
   206  	resolutions chan []ResolutionMsg
   207  
   208  	log ArbitratorLog
   209  
   210  	sweeper *mockSweeper
   211  
   212  	breachSubscribed     chan struct{}
   213  	breachResolutionChan chan struct{}
   214  }
   215  
   216  func (c *chanArbTestCtx) CleanUp() {
   217  	if err := c.chanArb.Stop(); err != nil {
   218  		c.t.Fatalf("unable to stop chan arb: %v", err)
   219  	}
   220  
   221  	if c.cleanUp != nil {
   222  		c.cleanUp()
   223  	}
   224  }
   225  
   226  // AssertStateTransitions asserts that the state machine steps through the
   227  // passed states in order.
   228  func (c *chanArbTestCtx) AssertStateTransitions(expectedStates ...ArbitratorState) {
   229  	c.t.Helper()
   230  
   231  	var newStatesChan chan ArbitratorState
   232  	switch log := c.log.(type) {
   233  	case *mockArbitratorLog:
   234  		newStatesChan = log.newStates
   235  
   236  	case *testArbLog:
   237  		newStatesChan = log.newStates
   238  
   239  	default:
   240  		c.t.Fatalf("unable to assert state transitions with %T", log)
   241  	}
   242  
   243  	for _, exp := range expectedStates {
   244  		var state ArbitratorState
   245  		select {
   246  		case state = <-newStatesChan:
   247  		case <-time.After(defaultTimeout):
   248  			c.t.Fatalf("new state not received")
   249  		}
   250  
   251  		if state != exp {
   252  			c.t.Fatalf("expected new state %v, got %v", exp, state)
   253  		}
   254  	}
   255  }
   256  
   257  // AssertState checks that the ChannelArbitrator is in the state we expect it
   258  // to be.
   259  func (c *chanArbTestCtx) AssertState(expected ArbitratorState) {
   260  	if c.chanArb.state != expected {
   261  		c.t.Fatalf("expected state %v, was %v", expected, c.chanArb.state)
   262  	}
   263  }
   264  
   265  // Restart simulates a clean restart of the channel arbitrator, forcing it to
   266  // walk through it's recovery logic. If this function returns nil, then a
   267  // restart was successful. Note that the restart process keeps the log in
   268  // place, in order to simulate proper persistence of the log. The caller can
   269  // optionally provide a restart closure which will be executed before the
   270  // resolver is started again, but after it is created.
   271  func (c *chanArbTestCtx) Restart(restartClosure func(*chanArbTestCtx)) (*chanArbTestCtx, error) {
   272  	if err := c.chanArb.Stop(); err != nil {
   273  		return nil, err
   274  	}
   275  
   276  	newCtx, err := createTestChannelArbitrator(c.t, c.log)
   277  	if err != nil {
   278  		return nil, err
   279  	}
   280  
   281  	if restartClosure != nil {
   282  		restartClosure(newCtx)
   283  	}
   284  
   285  	if err := newCtx.chanArb.Start(nil); err != nil {
   286  		return nil, err
   287  	}
   288  
   289  	return newCtx, nil
   290  }
   291  
   292  // testChanArbOption applies custom settings to a channel arbitrator config for
   293  // testing purposes.
   294  type testChanArbOption func(cfg *ChannelArbitratorConfig)
   295  
   296  // remoteInitiatorOption sets the MarkChannelClosed function in the
   297  // Channel Arbitrator's config.
   298  func withMarkClosed(markClosed func(*channeldb.ChannelCloseSummary,
   299  	...channeldb.ChannelStatus) error) testChanArbOption {
   300  
   301  	return func(cfg *ChannelArbitratorConfig) {
   302  		cfg.MarkChannelClosed = markClosed
   303  	}
   304  }
   305  
   306  // createTestChannelArbitrator returns a channel arbitrator test context which
   307  // contains a channel arbitrator with default values. These values can be
   308  // changed by providing options which overwrite the default config.
   309  func createTestChannelArbitrator(t *testing.T, log ArbitratorLog,
   310  	opts ...testChanArbOption) (*chanArbTestCtx, error) {
   311  
   312  	chanArbCtx := &chanArbTestCtx{
   313  		breachSubscribed: make(chan struct{}),
   314  	}
   315  
   316  	chanPoint := wire.OutPoint{}
   317  	shortChanID := lnwire.ShortChannelID{}
   318  	chanEvents := &ChainEventSubscription{
   319  		RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1),
   320  		LocalUnilateralClosure:  make(chan *LocalUnilateralCloseInfo, 1),
   321  		CooperativeClosure:      make(chan *CooperativeCloseInfo, 1),
   322  		ContractBreach:          make(chan *BreachCloseInfo, 1),
   323  	}
   324  
   325  	resolutionChan := make(chan []ResolutionMsg, 1)
   326  	incubateChan := make(chan struct{})
   327  
   328  	chainIO := &mockChainIO{}
   329  	mockSweeper := newMockSweeper()
   330  	chainArbCfg := ChainArbitratorConfig{
   331  		NetParams: chaincfg.RegNetParams(),
   332  		ChainIO:   chainIO,
   333  		PublishTx: func(*wire.MsgTx, string) error {
   334  			return nil
   335  		},
   336  		DeliverResolutionMsg: func(msgs ...ResolutionMsg) error {
   337  			resolutionChan <- msgs
   338  			return nil
   339  		},
   340  		OutgoingBroadcastDelta: 5,
   341  		IncomingBroadcastDelta: 5,
   342  		Notifier: &mock.ChainNotifier{
   343  			EpochChan: make(chan *chainntnfs.BlockEpoch),
   344  			SpendChan: make(chan *chainntnfs.SpendDetail),
   345  			ConfChan:  make(chan *chainntnfs.TxConfirmation),
   346  		},
   347  		IncubateOutputs: func(wire.OutPoint,
   348  			*lnwallet.OutgoingHtlcResolution,
   349  			*lnwallet.IncomingHtlcResolution, uint32) error {
   350  
   351  			incubateChan <- struct{}{}
   352  			return nil
   353  		},
   354  		OnionProcessor: &mockOnionProcessor{},
   355  		IsForwardedHTLC: func(chanID lnwire.ShortChannelID,
   356  			htlcIndex uint64) bool {
   357  
   358  			return true
   359  		},
   360  		SubscribeBreachComplete: func(op *wire.OutPoint,
   361  			c chan struct{}) (bool, error) {
   362  
   363  			chanArbCtx.breachResolutionChan = c
   364  			chanArbCtx.breachSubscribed <- struct{}{}
   365  			return false, nil
   366  		},
   367  		Clock:   clock.NewDefaultClock(),
   368  		Sweeper: mockSweeper,
   369  	}
   370  
   371  	// We'll use the resolvedChan to synchronize on call to
   372  	// MarkChannelResolved.
   373  	resolvedChan := make(chan struct{}, 1)
   374  
   375  	// Next we'll create the matching configuration struct that contains
   376  	// all interfaces and methods the arbitrator needs to do its job.
   377  	arbCfg := &ChannelArbitratorConfig{
   378  		ChanPoint:   chanPoint,
   379  		ShortChanID: shortChanID,
   380  		MarkChannelResolved: func() error {
   381  			resolvedChan <- struct{}{}
   382  			return nil
   383  		},
   384  		Channel: &mockChannel{},
   385  		MarkCommitmentBroadcasted: func(_ *wire.MsgTx, _ bool) error {
   386  			return nil
   387  		},
   388  		MarkChannelClosed: func(*channeldb.ChannelCloseSummary,
   389  			...channeldb.ChannelStatus) error {
   390  			return nil
   391  		},
   392  		IsPendingClose:        false,
   393  		ChainArbitratorConfig: chainArbCfg,
   394  		ChainEvents:           chanEvents,
   395  		PutResolverReport: func(_ kvdb.RwTx,
   396  			_ *channeldb.ResolverReport) error {
   397  
   398  			return nil
   399  		},
   400  		FetchHistoricalChannel: func() (*channeldb.OpenChannel, error) {
   401  			return &channeldb.OpenChannel{}, nil
   402  		},
   403  	}
   404  
   405  	// Apply all custom options to the config struct.
   406  	for _, option := range opts {
   407  		option(arbCfg)
   408  	}
   409  
   410  	var cleanUp func()
   411  	if log == nil {
   412  		dbDir, err := ioutil.TempDir("", "chanArb")
   413  		if err != nil {
   414  			return nil, err
   415  		}
   416  		dbPath := filepath.Join(dbDir, "testdb")
   417  		db, err := kvdb.Create(
   418  			kvdb.BoltBackendName, dbPath, true,
   419  			kvdb.DefaultDBTimeout,
   420  		)
   421  		if err != nil {
   422  			return nil, err
   423  		}
   424  
   425  		backingLog, err := newBoltArbitratorLog(
   426  			db, *arbCfg, chainhash.Hash{}, chanPoint,
   427  		)
   428  		if err != nil {
   429  			return nil, err
   430  		}
   431  		cleanUp = func() {
   432  			db.Close()
   433  			os.RemoveAll(dbDir)
   434  		}
   435  
   436  		log = &testArbLog{
   437  			ArbitratorLog: backingLog,
   438  			newStates:     make(chan ArbitratorState),
   439  		}
   440  	}
   441  
   442  	htlcSets := make(map[HtlcSetKey]htlcSet)
   443  
   444  	chanArb := NewChannelArbitrator(*arbCfg, htlcSets, log)
   445  
   446  	chanArbCtx.t = t
   447  	chanArbCtx.chanArb = chanArb
   448  	chanArbCtx.cleanUp = cleanUp
   449  	chanArbCtx.resolvedChan = resolvedChan
   450  	chanArbCtx.resolutions = resolutionChan
   451  	chanArbCtx.log = log
   452  	chanArbCtx.incubationRequests = incubateChan
   453  	chanArbCtx.sweeper = mockSweeper
   454  
   455  	return chanArbCtx, nil
   456  }
   457  
   458  // TestChannelArbitratorCooperativeClose tests that the ChannelArbitertor
   459  // correctly marks the channel resolved in case a cooperative close is
   460  // confirmed.
   461  func TestChannelArbitratorCooperativeClose(t *testing.T) {
   462  	log := &mockArbitratorLog{
   463  		state:     StateDefault,
   464  		newStates: make(chan ArbitratorState, 5),
   465  	}
   466  
   467  	chanArbCtx, err := createTestChannelArbitrator(t, log)
   468  	if err != nil {
   469  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
   470  	}
   471  
   472  	if err := chanArbCtx.chanArb.Start(nil); err != nil {
   473  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
   474  	}
   475  	defer func() {
   476  		if err := chanArbCtx.chanArb.Stop(); err != nil {
   477  			t.Fatalf("unable to stop chan arb: %v", err)
   478  		}
   479  	}()
   480  
   481  	// It should start out in the default state.
   482  	chanArbCtx.AssertState(StateDefault)
   483  
   484  	// We set up a channel to detect when MarkChannelClosed is called.
   485  	closeInfos := make(chan *channeldb.ChannelCloseSummary)
   486  	chanArbCtx.chanArb.cfg.MarkChannelClosed = func(
   487  		closeInfo *channeldb.ChannelCloseSummary,
   488  		statuses ...channeldb.ChannelStatus) error {
   489  
   490  		closeInfos <- closeInfo
   491  		return nil
   492  	}
   493  
   494  	// Cooperative close should do trigger a MarkChannelClosed +
   495  	// MarkChannelResolved.
   496  	closeInfo := &CooperativeCloseInfo{
   497  		&channeldb.ChannelCloseSummary{},
   498  	}
   499  	chanArbCtx.chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
   500  
   501  	select {
   502  	case c := <-closeInfos:
   503  		if c.CloseType != channeldb.CooperativeClose {
   504  			t.Fatalf("expected cooperative close, got %v", c.CloseType)
   505  		}
   506  	case <-time.After(defaultTimeout):
   507  		t.Fatalf("timeout waiting for channel close")
   508  	}
   509  
   510  	// It should mark the channel as resolved.
   511  	select {
   512  	case <-chanArbCtx.resolvedChan:
   513  		// Expected.
   514  	case <-time.After(defaultTimeout):
   515  		t.Fatalf("contract was not resolved")
   516  	}
   517  }
   518  
   519  // TestChannelArbitratorRemoteForceClose checks that the ChannelArbitrator goes
   520  // through the expected states if a remote force close is observed in the
   521  // chain.
   522  func TestChannelArbitratorRemoteForceClose(t *testing.T) {
   523  	log := &mockArbitratorLog{
   524  		state:     StateDefault,
   525  		newStates: make(chan ArbitratorState, 5),
   526  	}
   527  
   528  	chanArbCtx, err := createTestChannelArbitrator(t, log)
   529  	if err != nil {
   530  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
   531  	}
   532  	chanArb := chanArbCtx.chanArb
   533  
   534  	if err := chanArb.Start(nil); err != nil {
   535  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
   536  	}
   537  	defer chanArb.Stop()
   538  
   539  	// It should start out in the default state.
   540  	chanArbCtx.AssertState(StateDefault)
   541  
   542  	// Send a remote force close event.
   543  	commitSpend := &chainntnfs.SpendDetail{
   544  		SpenderTxHash: &chainhash.Hash{},
   545  	}
   546  
   547  	uniClose := &lnwallet.UnilateralCloseSummary{
   548  		SpendDetail:     commitSpend,
   549  		HtlcResolutions: &lnwallet.HtlcResolutions{},
   550  	}
   551  	chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
   552  		UnilateralCloseSummary: uniClose,
   553  		CommitSet: CommitSet{
   554  			ConfCommitKey: &RemoteHtlcSet,
   555  			HtlcSets:      make(map[HtlcSetKey][]channeldb.HTLC),
   556  		},
   557  	}
   558  
   559  	// It should transition StateDefault -> StateContractClosed ->
   560  	// StateFullyResolved.
   561  	chanArbCtx.AssertStateTransitions(
   562  		StateContractClosed, StateFullyResolved,
   563  	)
   564  
   565  	// It should also mark the channel as resolved.
   566  	select {
   567  	case <-chanArbCtx.resolvedChan:
   568  		// Expected.
   569  	case <-time.After(defaultTimeout):
   570  		t.Fatalf("contract was not resolved")
   571  	}
   572  }
   573  
   574  // TestChannelArbitratorLocalForceClose tests that the ChannelArbitrator goes
   575  // through the expected states in case we request it to force close the channel,
   576  // and the local force close event is observed in chain.
   577  func TestChannelArbitratorLocalForceClose(t *testing.T) {
   578  	log := &mockArbitratorLog{
   579  		state:     StateDefault,
   580  		newStates: make(chan ArbitratorState, 5),
   581  	}
   582  
   583  	chanArbCtx, err := createTestChannelArbitrator(t, log)
   584  	if err != nil {
   585  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
   586  	}
   587  	chanArb := chanArbCtx.chanArb
   588  
   589  	if err := chanArb.Start(nil); err != nil {
   590  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
   591  	}
   592  	defer chanArb.Stop()
   593  
   594  	// It should start out in the default state.
   595  	chanArbCtx.AssertState(StateDefault)
   596  
   597  	// We create a channel we can use to pause the ChannelArbitrator at the
   598  	// point where it broadcasts the close tx, and check its state.
   599  	stateChan := make(chan ArbitratorState)
   600  	chanArb.cfg.PublishTx = func(*wire.MsgTx, string) error {
   601  		// When the force close tx is being broadcasted, check that the
   602  		// state is correct at that point.
   603  		select {
   604  		case stateChan <- chanArb.state:
   605  		case <-chanArb.quit:
   606  			return fmt.Errorf("exiting")
   607  		}
   608  		return nil
   609  	}
   610  
   611  	errChan := make(chan error, 1)
   612  	respChan := make(chan *wire.MsgTx, 1)
   613  
   614  	// With the channel found, and the request crafted, we'll send over a
   615  	// force close request to the arbitrator that watches this channel.
   616  	chanArb.forceCloseReqs <- &forceCloseReq{
   617  		errResp: errChan,
   618  		closeTx: respChan,
   619  	}
   620  
   621  	// It should transition to StateBroadcastCommit.
   622  	chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
   623  
   624  	// When it is broadcasting the force close, its state should be
   625  	// StateBroadcastCommit.
   626  	select {
   627  	case state := <-stateChan:
   628  		if state != StateBroadcastCommit {
   629  			t.Fatalf("state during PublishTx was %v", state)
   630  		}
   631  	case <-time.After(stateTimeout):
   632  		t.Fatalf("did not get state update")
   633  	}
   634  
   635  	// After broadcasting, transition should be to
   636  	// StateCommitmentBroadcasted.
   637  	chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
   638  
   639  	select {
   640  	case <-respChan:
   641  	case <-time.After(defaultTimeout):
   642  		t.Fatalf("no response received")
   643  	}
   644  
   645  	select {
   646  	case err := <-errChan:
   647  		if err != nil {
   648  			t.Fatalf("error force closing channel: %v", err)
   649  		}
   650  	case <-time.After(defaultTimeout):
   651  		t.Fatalf("no response received")
   652  	}
   653  
   654  	// After broadcasting the close tx, it should be in state
   655  	// StateCommitmentBroadcasted.
   656  	chanArbCtx.AssertState(StateCommitmentBroadcasted)
   657  
   658  	// Now notify about the local force close getting confirmed.
   659  	chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
   660  		SpendDetail: &chainntnfs.SpendDetail{},
   661  		LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
   662  			CloseTx:         &wire.MsgTx{},
   663  			HtlcResolutions: &lnwallet.HtlcResolutions{},
   664  		},
   665  		ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
   666  	}
   667  
   668  	// It should transition StateContractClosed -> StateFullyResolved.
   669  	chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
   670  
   671  	// It should also mark the channel as resolved.
   672  	select {
   673  	case <-chanArbCtx.resolvedChan:
   674  		// Expected.
   675  	case <-time.After(defaultTimeout):
   676  		t.Fatalf("contract was not resolved")
   677  	}
   678  }
   679  
   680  // TestChannelArbitratorBreachClose tests that the ChannelArbitrator goes
   681  // through the expected states in case we notice a breach in the chain, and
   682  // is able to properly progress the breachResolver and anchorResolver to a
   683  // successful resolution.
   684  func TestChannelArbitratorBreachClose(t *testing.T) {
   685  	log := &mockArbitratorLog{
   686  		state:     StateDefault,
   687  		newStates: make(chan ArbitratorState, 5),
   688  		resolvers: make(map[ContractResolver]struct{}),
   689  	}
   690  
   691  	chanArbCtx, err := createTestChannelArbitrator(t, log)
   692  	if err != nil {
   693  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
   694  	}
   695  	chanArb := chanArbCtx.chanArb
   696  	chanArb.cfg.PreimageDB = newMockWitnessBeacon()
   697  	chanArb.cfg.Registry = &mockRegistry{}
   698  
   699  	if err := chanArb.Start(nil); err != nil {
   700  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
   701  	}
   702  	defer func() {
   703  		if err := chanArb.Stop(); err != nil {
   704  			t.Fatal(err)
   705  		}
   706  	}()
   707  
   708  	// It should start out in the default state.
   709  	chanArbCtx.AssertState(StateDefault)
   710  
   711  	// We create two HTLCs, one incoming and one outgoing. We will later
   712  	// assert that we only receive a ResolutionMsg for the outgoing HTLC.
   713  	outgoingIdx := uint64(2)
   714  
   715  	rHash1 := [lntypes.PreimageSize]byte{1, 2, 3}
   716  	htlc1 := channeldb.HTLC{
   717  		RHash:       rHash1,
   718  		OutputIndex: 2,
   719  		Incoming:    false,
   720  		HtlcIndex:   outgoingIdx,
   721  		LogIndex:    2,
   722  	}
   723  
   724  	rHash2 := [lntypes.PreimageSize]byte{2, 2, 2}
   725  	htlc2 := channeldb.HTLC{
   726  		RHash:       rHash2,
   727  		OutputIndex: 3,
   728  		Incoming:    true,
   729  		HtlcIndex:   3,
   730  		LogIndex:    3,
   731  	}
   732  
   733  	anchorRes := &lnwallet.AnchorResolution{
   734  		AnchorSignDescriptor: input.SignDescriptor{
   735  			Output: &wire.TxOut{Value: 1},
   736  		},
   737  	}
   738  
   739  	// Create the BreachCloseInfo that the chain_watcher would normally
   740  	// send to the channel_arbitrator.
   741  	breachInfo := &BreachCloseInfo{
   742  		BreachResolution: &BreachResolution{
   743  			FundingOutPoint: wire.OutPoint{},
   744  		},
   745  		AnchorResolution: anchorRes,
   746  		CommitSet: CommitSet{
   747  			ConfCommitKey: &RemoteHtlcSet,
   748  			HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
   749  				RemoteHtlcSet: {htlc1, htlc2},
   750  			},
   751  		},
   752  		CommitHash: chainhash.Hash{},
   753  	}
   754  
   755  	// Send a breach close event.
   756  	chanArb.cfg.ChainEvents.ContractBreach <- breachInfo
   757  
   758  	// It should transition StateDefault -> StateContractClosed.
   759  	chanArbCtx.AssertStateTransitions(StateContractClosed)
   760  
   761  	// We should receive one ResolutionMsg as there was only one outgoing
   762  	// HTLC at the time of the breach.
   763  	select {
   764  	case res := <-chanArbCtx.resolutions:
   765  		require.Equal(t, 1, len(res))
   766  		require.Equal(t, outgoingIdx, res[0].HtlcIndex)
   767  	case <-time.After(5 * time.Second):
   768  		t.Fatal("expected to receive a resolution msg")
   769  	}
   770  
   771  	// We should now transition from StateContractClosed to
   772  	// StateWaitingFullResolution.
   773  	chanArbCtx.AssertStateTransitions(StateWaitingFullResolution)
   774  
   775  	// One of the resolvers should be an anchor resolver and the other
   776  	// should be a breach resolver.
   777  	require.Equal(t, 2, len(chanArb.activeResolvers))
   778  
   779  	var anchorExists, breachExists bool
   780  	for _, resolver := range chanArb.activeResolvers {
   781  		switch resolver.(type) {
   782  		case *anchorResolver:
   783  			anchorExists = true
   784  		case *breachResolver:
   785  			breachExists = true
   786  		default:
   787  			t.Fatalf("did not expect resolver %T", resolver)
   788  		}
   789  	}
   790  	require.True(t, anchorExists && breachExists)
   791  
   792  	// The anchor resolver is expected to re-offer the anchor input to the
   793  	// sweeper.
   794  	<-chanArbCtx.sweeper.sweptInputs
   795  
   796  	// Wait for SubscribeBreachComplete to be called.
   797  	<-chanArbCtx.breachSubscribed
   798  
   799  	// We'll now close the breach channel so that the state transitions to
   800  	// StateFullyResolved.
   801  	close(chanArbCtx.breachResolutionChan)
   802  
   803  	chanArbCtx.AssertStateTransitions(StateFullyResolved)
   804  
   805  	// It should also mark the channel as resolved.
   806  	select {
   807  	case <-chanArbCtx.resolvedChan:
   808  		// Expected.
   809  	case <-time.After(defaultTimeout):
   810  		t.Fatalf("contract was not resolved")
   811  	}
   812  }
   813  
   814  // TestChannelArbitratorLocalForceClosePendingHtlc tests that the
   815  // ChannelArbitrator goes through the expected states in case we request it to
   816  // force close a channel that still has an HTLC pending.
   817  func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
   818  	// We create a new test context for this channel arb, notice that we
   819  	// pass in a nil ArbitratorLog which means that a default one backed by
   820  	// a real DB will be created. We need this for our test as we want to
   821  	// test proper restart recovery and resolver population.
   822  	chanArbCtx, err := createTestChannelArbitrator(t, nil)
   823  	if err != nil {
   824  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
   825  	}
   826  	chanArb := chanArbCtx.chanArb
   827  	chanArb.cfg.PreimageDB = newMockWitnessBeacon()
   828  	chanArb.cfg.Registry = &mockRegistry{}
   829  
   830  	if err := chanArb.Start(nil); err != nil {
   831  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
   832  	}
   833  	defer chanArb.Stop()
   834  
   835  	// Create htlcUpdates channel.
   836  	htlcUpdates := make(chan *ContractUpdate)
   837  
   838  	signals := &ContractSignals{
   839  		HtlcUpdates: htlcUpdates,
   840  		ShortChanID: lnwire.ShortChannelID{},
   841  	}
   842  	chanArb.UpdateContractSignals(signals)
   843  
   844  	// Add HTLC to channel arbitrator.
   845  	htlcAmt := 10000
   846  	htlc := channeldb.HTLC{
   847  		Incoming:  false,
   848  		Amt:       lnwire.MilliAtom(htlcAmt),
   849  		HtlcIndex: 99,
   850  	}
   851  
   852  	outgoingDustHtlc := channeldb.HTLC{
   853  		Incoming:    false,
   854  		Amt:         100,
   855  		HtlcIndex:   100,
   856  		OutputIndex: -1,
   857  	}
   858  
   859  	incomingDustHtlc := channeldb.HTLC{
   860  		Incoming:    true,
   861  		Amt:         105,
   862  		HtlcIndex:   101,
   863  		OutputIndex: -1,
   864  	}
   865  
   866  	htlcSet := []channeldb.HTLC{
   867  		htlc, outgoingDustHtlc, incomingDustHtlc,
   868  	}
   869  
   870  	htlcUpdates <- &ContractUpdate{
   871  		HtlcKey: LocalHtlcSet,
   872  		Htlcs:   htlcSet,
   873  	}
   874  
   875  	errChan := make(chan error, 1)
   876  	respChan := make(chan *wire.MsgTx, 1)
   877  
   878  	// With the channel found, and the request crafted, we'll send over a
   879  	// force close request to the arbitrator that watches this channel.
   880  	chanArb.forceCloseReqs <- &forceCloseReq{
   881  		errResp: errChan,
   882  		closeTx: respChan,
   883  	}
   884  
   885  	// The force close request should trigger broadcast of the commitment
   886  	// transaction.
   887  	chanArbCtx.AssertStateTransitions(
   888  		StateBroadcastCommit,
   889  		StateCommitmentBroadcasted,
   890  	)
   891  	select {
   892  	case <-respChan:
   893  	case <-time.After(defaultTimeout):
   894  		t.Fatalf("no response received")
   895  	}
   896  
   897  	select {
   898  	case err := <-errChan:
   899  		if err != nil {
   900  			t.Fatalf("error force closing channel: %v", err)
   901  		}
   902  	case <-time.After(defaultTimeout):
   903  		t.Fatalf("no response received")
   904  	}
   905  
   906  	// Now notify about the local force close getting confirmed.
   907  	closeTx := &wire.MsgTx{
   908  		TxIn: []*wire.TxIn{
   909  			{
   910  				PreviousOutPoint: wire.OutPoint{},
   911  				SignatureScript: []byte{
   912  					0x01, 0x01,
   913  					0x01, 0x02,
   914  				},
   915  			},
   916  		},
   917  	}
   918  
   919  	htlcOp := wire.OutPoint{
   920  		Hash:  closeTx.TxHash(),
   921  		Index: 0,
   922  	}
   923  
   924  	// Set up the outgoing resolution. Populate SignedTimeoutTx because our
   925  	// commitment transaction got confirmed.
   926  	outgoingRes := lnwallet.OutgoingHtlcResolution{
   927  		Expiry: 10,
   928  		SweepSignDesc: input.SignDescriptor{
   929  			Output: &wire.TxOut{},
   930  		},
   931  		SignedTimeoutTx: &wire.MsgTx{
   932  			TxIn: []*wire.TxIn{
   933  				{
   934  					PreviousOutPoint: htlcOp,
   935  					SignatureScript:  []byte{0x01, 0xff},
   936  				},
   937  			},
   938  			TxOut: []*wire.TxOut{
   939  				{},
   940  			},
   941  		},
   942  	}
   943  
   944  	chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
   945  		SpendDetail: &chainntnfs.SpendDetail{},
   946  		LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
   947  			CloseTx: closeTx,
   948  			HtlcResolutions: &lnwallet.HtlcResolutions{
   949  				OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{
   950  					outgoingRes,
   951  				},
   952  			},
   953  		},
   954  		ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
   955  		CommitSet: CommitSet{
   956  			ConfCommitKey: &LocalHtlcSet,
   957  			HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
   958  				LocalHtlcSet: htlcSet,
   959  			},
   960  		},
   961  	}
   962  
   963  	chanArbCtx.AssertStateTransitions(
   964  		StateContractClosed,
   965  		StateWaitingFullResolution,
   966  	)
   967  
   968  	// We expect an immediate resolution message for the outgoing dust htlc.
   969  	// It is not resolvable on-chain.
   970  	select {
   971  	case msgs := <-chanArbCtx.resolutions:
   972  		if len(msgs) != 1 {
   973  			t.Fatalf("expected 1 message, instead got %v", len(msgs))
   974  		}
   975  
   976  		if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex {
   977  			t.Fatalf("wrong htlc index: expected %v, got %v",
   978  				outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex)
   979  		}
   980  	case <-time.After(defaultTimeout):
   981  		t.Fatalf("resolution msgs not sent")
   982  	}
   983  
   984  	// We'll grab the old notifier here as our resolvers are still holding
   985  	// a reference to this instance, and a new one will be created when we
   986  	// restart the channel arb below.
   987  	oldNotifier := chanArb.cfg.Notifier.(*mock.ChainNotifier)
   988  
   989  	// At this point, in order to simulate a restart, we'll re-create the
   990  	// channel arbitrator. We do this to ensure that all information
   991  	// required to properly resolve this HTLC are populated.
   992  	if err := chanArb.Stop(); err != nil {
   993  		t.Fatalf("unable to stop chan arb: %v", err)
   994  	}
   995  
   996  	// We'll no re-create the resolver, notice that we use the existing
   997  	// arbLog so it carries over the same on-disk state.
   998  	chanArbCtxNew, err := chanArbCtx.Restart(nil)
   999  	if err != nil {
  1000  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1001  	}
  1002  	chanArb = chanArbCtxNew.chanArb
  1003  	defer chanArbCtxNew.CleanUp()
  1004  
  1005  	// Post restart, it should be the case that our resolver was properly
  1006  	// supplemented, and we only have a single resolver in the final set.
  1007  	if len(chanArb.activeResolvers) != 1 {
  1008  		t.Fatalf("expected single resolver, instead got: %v",
  1009  			len(chanArb.activeResolvers))
  1010  	}
  1011  
  1012  	// We'll now examine the in-memory state of the active resolvers to
  1013  	// ensure t hey were populated properly.
  1014  	resolver := chanArb.activeResolvers[0]
  1015  	outgoingResolver, ok := resolver.(*htlcOutgoingContestResolver)
  1016  	if !ok {
  1017  		t.Fatalf("expected outgoing contest resolver, got %vT",
  1018  			resolver)
  1019  	}
  1020  
  1021  	// The resolver should have its htlc amt field populated as it.
  1022  	if int64(outgoingResolver.htlc.Amt) != int64(htlcAmt) {
  1023  		t.Fatalf("wrong htlc amount: expected %v, got %v,",
  1024  			htlcAmt, int64(outgoingResolver.htlc.Amt))
  1025  	}
  1026  
  1027  	// htlcOutgoingContestResolver is now active and waiting for the HTLC to
  1028  	// expire. It should not yet have passed it on for incubation.
  1029  	select {
  1030  	case <-chanArbCtx.incubationRequests:
  1031  		t.Fatalf("contract should not be incubated yet")
  1032  	default:
  1033  	}
  1034  
  1035  	// Send a notification that the expiry height has been reached.
  1036  	oldNotifier.EpochChan <- &chainntnfs.BlockEpoch{Height: 10}
  1037  
  1038  	// htlcOutgoingContestResolver is now transforming into a
  1039  	// htlcTimeoutResolver and should send the contract off for incubation.
  1040  	select {
  1041  	case <-chanArbCtx.incubationRequests:
  1042  	case <-time.After(defaultTimeout):
  1043  		t.Fatalf("no response received")
  1044  	}
  1045  
  1046  	// Notify resolver that the HTLC output of the commitment has been
  1047  	// spent.
  1048  	oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
  1049  
  1050  	// Finally, we should also receive a resolution message instructing the
  1051  	// switch to cancel back the HTLC.
  1052  	select {
  1053  	case msgs := <-chanArbCtx.resolutions:
  1054  		if len(msgs) != 1 {
  1055  			t.Fatalf("expected 1 message, instead got %v", len(msgs))
  1056  		}
  1057  
  1058  		if msgs[0].HtlcIndex != htlc.HtlcIndex {
  1059  			t.Fatalf("wrong htlc index: expected %v, got %v",
  1060  				htlc.HtlcIndex, msgs[0].HtlcIndex)
  1061  		}
  1062  	case <-time.After(defaultTimeout):
  1063  		t.Fatalf("resolution msgs not sent")
  1064  	}
  1065  
  1066  	// As this is our own commitment transaction, the HTLC will go through
  1067  	// to the second level. Channel arbitrator should still not be marked
  1068  	// as resolved.
  1069  	select {
  1070  	case <-chanArbCtxNew.resolvedChan:
  1071  		t.Fatalf("channel resolved prematurely")
  1072  	default:
  1073  	}
  1074  
  1075  	// Notify resolver that the second level transaction is spent.
  1076  	oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
  1077  
  1078  	// At this point channel should be marked as resolved.
  1079  	chanArbCtxNew.AssertStateTransitions(StateFullyResolved)
  1080  	select {
  1081  	case <-chanArbCtxNew.resolvedChan:
  1082  	case <-time.After(defaultTimeout):
  1083  		t.Fatalf("contract was not resolved")
  1084  	}
  1085  }
  1086  
  1087  // TestChannelArbitratorLocalForceCloseRemoteConfiremd tests that the
  1088  // ChannelArbitrator behaves as expected in the case where we request a local
  1089  // force close, but a remote commitment ends up being confirmed in chain.
  1090  func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
  1091  	log := &mockArbitratorLog{
  1092  		state:     StateDefault,
  1093  		newStates: make(chan ArbitratorState, 5),
  1094  	}
  1095  
  1096  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  1097  	if err != nil {
  1098  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1099  	}
  1100  	chanArb := chanArbCtx.chanArb
  1101  
  1102  	if err := chanArb.Start(nil); err != nil {
  1103  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1104  	}
  1105  	defer chanArb.Stop()
  1106  
  1107  	// It should start out in the default state.
  1108  	chanArbCtx.AssertState(StateDefault)
  1109  
  1110  	// Create a channel we can use to assert the state when it publishes
  1111  	// the close tx.
  1112  	stateChan := make(chan ArbitratorState)
  1113  	chanArb.cfg.PublishTx = func(*wire.MsgTx, string) error {
  1114  		// When the force close tx is being broadcasted, check that the
  1115  		// state is correct at that point.
  1116  		select {
  1117  		case stateChan <- chanArb.state:
  1118  		case <-chanArb.quit:
  1119  			return fmt.Errorf("exiting")
  1120  		}
  1121  		return nil
  1122  	}
  1123  
  1124  	errChan := make(chan error, 1)
  1125  	respChan := make(chan *wire.MsgTx, 1)
  1126  
  1127  	// With the channel found, and the request crafted, we'll send over a
  1128  	// force close request to the arbitrator that watches this channel.
  1129  	chanArb.forceCloseReqs <- &forceCloseReq{
  1130  		errResp: errChan,
  1131  		closeTx: respChan,
  1132  	}
  1133  
  1134  	// It should transition to StateBroadcastCommit.
  1135  	chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
  1136  
  1137  	// We expect it to be in state StateBroadcastCommit when publishing
  1138  	// the force close.
  1139  	select {
  1140  	case state := <-stateChan:
  1141  		if state != StateBroadcastCommit {
  1142  			t.Fatalf("state during PublishTx was %v", state)
  1143  		}
  1144  	case <-time.After(stateTimeout):
  1145  		t.Fatalf("no state update received")
  1146  	}
  1147  
  1148  	// After broadcasting, transition should be to
  1149  	// StateCommitmentBroadcasted.
  1150  	chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
  1151  
  1152  	// Wait for a response to the force close.
  1153  	select {
  1154  	case <-respChan:
  1155  	case <-time.After(defaultTimeout):
  1156  		t.Fatalf("no response received")
  1157  	}
  1158  
  1159  	select {
  1160  	case err := <-errChan:
  1161  		if err != nil {
  1162  			t.Fatalf("error force closing channel: %v", err)
  1163  		}
  1164  	case <-time.After(defaultTimeout):
  1165  		t.Fatalf("no response received")
  1166  	}
  1167  
  1168  	// The state should be StateCommitmentBroadcasted.
  1169  	chanArbCtx.AssertState(StateCommitmentBroadcasted)
  1170  
  1171  	// Now notify about the _REMOTE_ commitment getting confirmed.
  1172  	commitSpend := &chainntnfs.SpendDetail{
  1173  		SpenderTxHash: &chainhash.Hash{},
  1174  	}
  1175  	uniClose := &lnwallet.UnilateralCloseSummary{
  1176  		SpendDetail:     commitSpend,
  1177  		HtlcResolutions: &lnwallet.HtlcResolutions{},
  1178  	}
  1179  	chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
  1180  		UnilateralCloseSummary: uniClose,
  1181  	}
  1182  
  1183  	// It should transition StateContractClosed -> StateFullyResolved.
  1184  	chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
  1185  
  1186  	// It should resolve.
  1187  	select {
  1188  	case <-chanArbCtx.resolvedChan:
  1189  		// Expected.
  1190  	case <-time.After(stateTimeout):
  1191  		t.Fatalf("contract was not resolved")
  1192  	}
  1193  }
  1194  
  1195  // TestChannelArbitratorLocalForceCloseDoubleSpend tests that the
  1196  // ChannelArbitrator behaves as expected in the case where we request a local
  1197  // force close, but we fail broadcasting our commitment because a remote
  1198  // commitment has already been published.
  1199  func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
  1200  	log := &mockArbitratorLog{
  1201  		state:     StateDefault,
  1202  		newStates: make(chan ArbitratorState, 5),
  1203  	}
  1204  
  1205  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  1206  	if err != nil {
  1207  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1208  	}
  1209  	chanArb := chanArbCtx.chanArb
  1210  
  1211  	if err := chanArb.Start(nil); err != nil {
  1212  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1213  	}
  1214  	defer chanArb.Stop()
  1215  
  1216  	// It should start out in the default state.
  1217  	chanArbCtx.AssertState(StateDefault)
  1218  
  1219  	// Return ErrDoubleSpend when attempting to publish the tx.
  1220  	stateChan := make(chan ArbitratorState)
  1221  	chanArb.cfg.PublishTx = func(*wire.MsgTx, string) error {
  1222  		// When the force close tx is being broadcasted, check that the
  1223  		// state is correct at that point.
  1224  		select {
  1225  		case stateChan <- chanArb.state:
  1226  		case <-chanArb.quit:
  1227  			return fmt.Errorf("exiting")
  1228  		}
  1229  		return lnwallet.ErrDoubleSpend
  1230  	}
  1231  
  1232  	errChan := make(chan error, 1)
  1233  	respChan := make(chan *wire.MsgTx, 1)
  1234  
  1235  	// With the channel found, and the request crafted, we'll send over a
  1236  	// force close request to the arbitrator that watches this channel.
  1237  	chanArb.forceCloseReqs <- &forceCloseReq{
  1238  		errResp: errChan,
  1239  		closeTx: respChan,
  1240  	}
  1241  
  1242  	// It should transition to StateBroadcastCommit.
  1243  	chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
  1244  
  1245  	// We expect it to be in state StateBroadcastCommit when publishing
  1246  	// the force close.
  1247  	select {
  1248  	case state := <-stateChan:
  1249  		if state != StateBroadcastCommit {
  1250  			t.Fatalf("state during PublishTx was %v", state)
  1251  		}
  1252  	case <-time.After(stateTimeout):
  1253  		t.Fatalf("no state update received")
  1254  	}
  1255  
  1256  	// After broadcasting, transition should be to
  1257  	// StateCommitmentBroadcasted.
  1258  	chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
  1259  
  1260  	// Wait for a response to the force close.
  1261  	select {
  1262  	case <-respChan:
  1263  	case <-time.After(defaultTimeout):
  1264  		t.Fatalf("no response received")
  1265  	}
  1266  
  1267  	select {
  1268  	case err := <-errChan:
  1269  		if err != nil {
  1270  			t.Fatalf("error force closing channel: %v", err)
  1271  		}
  1272  	case <-time.After(defaultTimeout):
  1273  		t.Fatalf("no response received")
  1274  	}
  1275  
  1276  	// The state should be StateCommitmentBroadcasted.
  1277  	chanArbCtx.AssertState(StateCommitmentBroadcasted)
  1278  
  1279  	// Now notify about the _REMOTE_ commitment getting confirmed.
  1280  	commitSpend := &chainntnfs.SpendDetail{
  1281  		SpenderTxHash: &chainhash.Hash{},
  1282  	}
  1283  	uniClose := &lnwallet.UnilateralCloseSummary{
  1284  		SpendDetail:     commitSpend,
  1285  		HtlcResolutions: &lnwallet.HtlcResolutions{},
  1286  	}
  1287  	chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
  1288  		UnilateralCloseSummary: uniClose,
  1289  	}
  1290  
  1291  	// It should transition StateContractClosed -> StateFullyResolved.
  1292  	chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
  1293  
  1294  	// It should resolve.
  1295  	select {
  1296  	case <-chanArbCtx.resolvedChan:
  1297  		// Expected.
  1298  	case <-time.After(stateTimeout):
  1299  		t.Fatalf("contract was not resolved")
  1300  	}
  1301  }
  1302  
  1303  // TestChannelArbitratorPersistence tests that the ChannelArbitrator is able to
  1304  // keep advancing the state machine from various states after restart.
  1305  func TestChannelArbitratorPersistence(t *testing.T) {
  1306  	// Start out with a log that will fail writing the set of resolutions.
  1307  	log := &mockArbitratorLog{
  1308  		state:     StateDefault,
  1309  		newStates: make(chan ArbitratorState, 5),
  1310  		failLog:   true,
  1311  	}
  1312  
  1313  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  1314  	if err != nil {
  1315  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1316  	}
  1317  
  1318  	chanArb := chanArbCtx.chanArb
  1319  	if err := chanArb.Start(nil); err != nil {
  1320  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1321  	}
  1322  
  1323  	// It should start in StateDefault.
  1324  	chanArbCtx.AssertState(StateDefault)
  1325  
  1326  	// Send a remote force close event.
  1327  	commitSpend := &chainntnfs.SpendDetail{
  1328  		SpenderTxHash: &chainhash.Hash{},
  1329  	}
  1330  
  1331  	uniClose := &lnwallet.UnilateralCloseSummary{
  1332  		SpendDetail:     commitSpend,
  1333  		HtlcResolutions: &lnwallet.HtlcResolutions{},
  1334  	}
  1335  	chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
  1336  		UnilateralCloseSummary: uniClose,
  1337  	}
  1338  
  1339  	// Since writing the resolutions fail, the arbitrator should not
  1340  	// advance to the next state.
  1341  	time.Sleep(100 * time.Millisecond)
  1342  	if log.state != StateDefault {
  1343  		t.Fatalf("expected to stay in StateDefault")
  1344  	}
  1345  
  1346  	// Restart the channel arb, this'll use the same long and prior
  1347  	// context.
  1348  	chanArbCtx, err = chanArbCtx.Restart(nil)
  1349  	if err != nil {
  1350  		t.Fatalf("unable to restart channel arb: %v", err)
  1351  	}
  1352  	chanArb = chanArbCtx.chanArb
  1353  
  1354  	// Again, it should start up in the default state.
  1355  	chanArbCtx.AssertState(StateDefault)
  1356  
  1357  	// Now we make the log succeed writing the resolutions, but fail when
  1358  	// attempting to close the channel.
  1359  	log.failLog = false
  1360  	chanArb.cfg.MarkChannelClosed = func(*channeldb.ChannelCloseSummary,
  1361  		...channeldb.ChannelStatus) error {
  1362  
  1363  		return fmt.Errorf("intentional close error")
  1364  	}
  1365  
  1366  	// Send a new remote force close event.
  1367  	chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
  1368  		UnilateralCloseSummary: uniClose,
  1369  	}
  1370  
  1371  	// Since closing the channel failed, the arbitrator should stay in the
  1372  	// default state.
  1373  	time.Sleep(100 * time.Millisecond)
  1374  	if log.state != StateDefault {
  1375  		t.Fatalf("expected to stay in StateDefault")
  1376  	}
  1377  
  1378  	// Restart once again to simulate yet another restart.
  1379  	chanArbCtx, err = chanArbCtx.Restart(nil)
  1380  	if err != nil {
  1381  		t.Fatalf("unable to restart channel arb: %v", err)
  1382  	}
  1383  	chanArb = chanArbCtx.chanArb
  1384  
  1385  	// Starts out in StateDefault.
  1386  	chanArbCtx.AssertState(StateDefault)
  1387  
  1388  	// Now make fetching the resolutions fail.
  1389  	log.failFetch = fmt.Errorf("intentional fetch failure")
  1390  	chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
  1391  		UnilateralCloseSummary: uniClose,
  1392  	}
  1393  
  1394  	// Since logging the resolutions and closing the channel now succeeds,
  1395  	// it should advance to StateContractClosed.
  1396  	chanArbCtx.AssertStateTransitions(StateContractClosed)
  1397  
  1398  	// It should not advance further, however, as fetching resolutions
  1399  	// failed.
  1400  	time.Sleep(100 * time.Millisecond)
  1401  	if log.state != StateContractClosed {
  1402  		t.Fatalf("expected to stay in StateContractClosed")
  1403  	}
  1404  	chanArb.Stop()
  1405  
  1406  	// Create a new arbitrator, and now make fetching resolutions succeed.
  1407  	log.failFetch = nil
  1408  	chanArbCtx, err = chanArbCtx.Restart(nil)
  1409  	if err != nil {
  1410  		t.Fatalf("unable to restart channel arb: %v", err)
  1411  	}
  1412  	defer chanArbCtx.CleanUp()
  1413  
  1414  	// Finally it should advance to StateFullyResolved.
  1415  	chanArbCtx.AssertStateTransitions(StateFullyResolved)
  1416  
  1417  	// It should also mark the channel as resolved.
  1418  	select {
  1419  	case <-chanArbCtx.resolvedChan:
  1420  		// Expected.
  1421  	case <-time.After(defaultTimeout):
  1422  		t.Fatalf("contract was not resolved")
  1423  	}
  1424  }
  1425  
  1426  // TestChannelArbitratorForceCloseBreachedChannel tests that the channel
  1427  // arbitrator is able to handle a channel in the process of being force closed
  1428  // is breached by the remote node. In these cases we expect the
  1429  // ChannelArbitrator to properly execute the breachResolver flow and then
  1430  // gracefully exit once the breachResolver receives the signal from what would
  1431  // normally be the breacharbiter.
  1432  func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) {
  1433  	log := &mockArbitratorLog{
  1434  		state:     StateDefault,
  1435  		newStates: make(chan ArbitratorState, 5),
  1436  		resolvers: make(map[ContractResolver]struct{}),
  1437  	}
  1438  
  1439  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  1440  	if err != nil {
  1441  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1442  	}
  1443  
  1444  	chanArb := chanArbCtx.chanArb
  1445  	if err := chanArb.Start(nil); err != nil {
  1446  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1447  	}
  1448  
  1449  	// It should start in StateDefault.
  1450  	chanArbCtx.AssertState(StateDefault)
  1451  
  1452  	// We start by attempting a local force close. We'll return an
  1453  	// unexpected publication error, causing the state machine to halt.
  1454  	expErr := errors.New("intentional publication error")
  1455  	stateChan := make(chan ArbitratorState)
  1456  	chanArb.cfg.PublishTx = func(*wire.MsgTx, string) error {
  1457  		// When the force close tx is being broadcasted, check that the
  1458  		// state is correct at that point.
  1459  		select {
  1460  		case stateChan <- chanArb.state:
  1461  		case <-chanArb.quit:
  1462  			return fmt.Errorf("exiting")
  1463  		}
  1464  		return expErr
  1465  	}
  1466  
  1467  	errChan := make(chan error, 1)
  1468  	respChan := make(chan *wire.MsgTx, 1)
  1469  
  1470  	// With the channel found, and the request crafted, we'll send over a
  1471  	// force close request to the arbitrator that watches this channel.
  1472  	chanArb.forceCloseReqs <- &forceCloseReq{
  1473  		errResp: errChan,
  1474  		closeTx: respChan,
  1475  	}
  1476  
  1477  	// It should transition to StateBroadcastCommit.
  1478  	chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
  1479  
  1480  	// We expect it to be in state StateBroadcastCommit when attempting
  1481  	// the force close.
  1482  	select {
  1483  	case state := <-stateChan:
  1484  		if state != StateBroadcastCommit {
  1485  			t.Fatalf("state during PublishTx was %v", state)
  1486  		}
  1487  	case <-time.After(stateTimeout):
  1488  		t.Fatalf("no state update received")
  1489  	}
  1490  
  1491  	// Make sure we get the expected error.
  1492  	select {
  1493  	case err := <-errChan:
  1494  		if err != expErr {
  1495  			t.Fatalf("unexpected error force closing channel: %v",
  1496  				err)
  1497  		}
  1498  	case <-time.After(defaultTimeout):
  1499  		t.Fatalf("no response received")
  1500  	}
  1501  
  1502  	// Before restarting, we'll need to modify the arbitrator log to have
  1503  	// a set of contract resolutions and a commit set.
  1504  	log.resolutions = &ContractResolutions{
  1505  		BreachResolution: &BreachResolution{
  1506  			FundingOutPoint: wire.OutPoint{},
  1507  		},
  1508  	}
  1509  	log.commitSet = &CommitSet{
  1510  		ConfCommitKey: &RemoteHtlcSet,
  1511  		HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
  1512  			RemoteHtlcSet: {},
  1513  		},
  1514  	}
  1515  
  1516  	// We mimic that the channel is breached while the channel arbitrator
  1517  	// is down. This means that on restart it will be started with a
  1518  	// pending close channel, of type BreachClose.
  1519  	chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) {
  1520  		c.chanArb.cfg.IsPendingClose = true
  1521  		c.chanArb.cfg.ClosingHeight = 100
  1522  		c.chanArb.cfg.CloseType = channeldb.BreachClose
  1523  	})
  1524  	if err != nil {
  1525  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1526  	}
  1527  	defer chanArbCtx.CleanUp()
  1528  
  1529  	// We should transition to StateContractClosed.
  1530  	chanArbCtx.AssertStateTransitions(
  1531  		StateContractClosed, StateWaitingFullResolution,
  1532  	)
  1533  
  1534  	// Wait for SubscribeBreachComplete to be called.
  1535  	<-chanArbCtx.breachSubscribed
  1536  
  1537  	// We'll close the breachResolutionChan to cleanup the breachResolver
  1538  	// and make the state transition to StateFullyResolved.
  1539  	close(chanArbCtx.breachResolutionChan)
  1540  
  1541  	chanArbCtx.AssertStateTransitions(StateFullyResolved)
  1542  
  1543  	// It should also mark the channel as resolved.
  1544  	select {
  1545  	case <-chanArbCtx.resolvedChan:
  1546  		// Expected.
  1547  	case <-time.After(defaultTimeout):
  1548  		t.Fatalf("contract was not resolved")
  1549  	}
  1550  }
  1551  
  1552  // TestChannelArbitratorCommitFailure tests that the channel arbitrator is able
  1553  // to recover from a failed CommitState call at restart.
  1554  func TestChannelArbitratorCommitFailure(t *testing.T) {
  1555  
  1556  	testCases := []struct {
  1557  
  1558  		// closeType is the type of channel close we want ot test.
  1559  		closeType channeldb.ClosureType
  1560  
  1561  		// sendEvent is a function that will send the event
  1562  		// corresponding to this test's closeType to the passed
  1563  		// ChannelArbitrator.
  1564  		sendEvent func(chanArb *ChannelArbitrator)
  1565  
  1566  		// expectedStates is the states we expect the state machine to
  1567  		// go through after a restart and successful log commit.
  1568  		expectedStates []ArbitratorState
  1569  	}{
  1570  		{
  1571  			closeType: channeldb.CooperativeClose,
  1572  			sendEvent: func(chanArb *ChannelArbitrator) {
  1573  				closeInfo := &CooperativeCloseInfo{
  1574  					&channeldb.ChannelCloseSummary{},
  1575  				}
  1576  				chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
  1577  			},
  1578  			expectedStates: []ArbitratorState{StateFullyResolved},
  1579  		},
  1580  		{
  1581  			closeType: channeldb.RemoteForceClose,
  1582  			sendEvent: func(chanArb *ChannelArbitrator) {
  1583  				commitSpend := &chainntnfs.SpendDetail{
  1584  					SpenderTxHash: &chainhash.Hash{},
  1585  				}
  1586  
  1587  				uniClose := &lnwallet.UnilateralCloseSummary{
  1588  					SpendDetail:     commitSpend,
  1589  					HtlcResolutions: &lnwallet.HtlcResolutions{},
  1590  				}
  1591  				chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
  1592  					UnilateralCloseSummary: uniClose,
  1593  				}
  1594  			},
  1595  			expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
  1596  		},
  1597  		{
  1598  			closeType: channeldb.LocalForceClose,
  1599  			sendEvent: func(chanArb *ChannelArbitrator) {
  1600  				chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
  1601  					SpendDetail: &chainntnfs.SpendDetail{},
  1602  					LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
  1603  						CloseTx:         &wire.MsgTx{},
  1604  						HtlcResolutions: &lnwallet.HtlcResolutions{},
  1605  					},
  1606  					ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
  1607  				}
  1608  			},
  1609  			expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
  1610  		},
  1611  	}
  1612  
  1613  	for _, test := range testCases {
  1614  		test := test
  1615  
  1616  		log := &mockArbitratorLog{
  1617  			state:      StateDefault,
  1618  			newStates:  make(chan ArbitratorState, 5),
  1619  			failCommit: true,
  1620  
  1621  			// Set the log to fail on the first expected state
  1622  			// after state machine progress for this test case.
  1623  			failCommitState: test.expectedStates[0],
  1624  		}
  1625  
  1626  		chanArbCtx, err := createTestChannelArbitrator(t, log)
  1627  		if err != nil {
  1628  			t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1629  		}
  1630  
  1631  		chanArb := chanArbCtx.chanArb
  1632  		if err := chanArb.Start(nil); err != nil {
  1633  			t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1634  		}
  1635  
  1636  		// It should start in StateDefault.
  1637  		chanArbCtx.AssertState(StateDefault)
  1638  
  1639  		closed := make(chan struct{})
  1640  		chanArb.cfg.MarkChannelClosed = func(
  1641  			*channeldb.ChannelCloseSummary,
  1642  			...channeldb.ChannelStatus) error {
  1643  			close(closed)
  1644  			return nil
  1645  		}
  1646  
  1647  		// Send the test event to trigger the state machine.
  1648  		test.sendEvent(chanArb)
  1649  
  1650  		select {
  1651  		case <-closed:
  1652  		case <-time.After(defaultTimeout):
  1653  			t.Fatalf("channel was not marked closed")
  1654  		}
  1655  
  1656  		// Since the channel was marked closed in the database, but the
  1657  		// commit to the next state failed, the state should still be
  1658  		// StateDefault.
  1659  		time.Sleep(100 * time.Millisecond)
  1660  		if log.state != StateDefault {
  1661  			t.Fatalf("expected to stay in StateDefault, instead "+
  1662  				"has %v", log.state)
  1663  		}
  1664  		chanArb.Stop()
  1665  
  1666  		// Start the arbitrator again, with IsPendingClose reporting
  1667  		// the channel closed in the database.
  1668  		log.failCommit = false
  1669  		chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) {
  1670  			c.chanArb.cfg.IsPendingClose = true
  1671  			c.chanArb.cfg.ClosingHeight = 100
  1672  			c.chanArb.cfg.CloseType = test.closeType
  1673  		})
  1674  		if err != nil {
  1675  			t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1676  		}
  1677  
  1678  		// Since the channel is marked closed in the database, it
  1679  		// should advance to the expected states.
  1680  		chanArbCtx.AssertStateTransitions(test.expectedStates...)
  1681  
  1682  		// It should also mark the channel as resolved.
  1683  		select {
  1684  		case <-chanArbCtx.resolvedChan:
  1685  			// Expected.
  1686  		case <-time.After(defaultTimeout):
  1687  			t.Fatalf("contract was not resolved")
  1688  		}
  1689  	}
  1690  }
  1691  
  1692  // TestChannelArbitratorEmptyResolutions makes sure that a channel that is
  1693  // pending close in the database, but haven't had any resolutions logged will
  1694  // not be marked resolved. This situation must be handled to avoid closing
  1695  // channels from earlier versions of the ChannelArbitrator, which didn't have a
  1696  // proper handoff from the ChainWatcher, and we could risk ending up in a state
  1697  // where the channel was closed in the DB, but the resolutions weren't properly
  1698  // written.
  1699  func TestChannelArbitratorEmptyResolutions(t *testing.T) {
  1700  	// Start out with a log that will fail writing the set of resolutions.
  1701  	log := &mockArbitratorLog{
  1702  		state:     StateDefault,
  1703  		newStates: make(chan ArbitratorState, 5),
  1704  		failFetch: errNoResolutions,
  1705  	}
  1706  
  1707  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  1708  	if err != nil {
  1709  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1710  	}
  1711  
  1712  	chanArb := chanArbCtx.chanArb
  1713  	chanArb.cfg.IsPendingClose = true
  1714  	chanArb.cfg.ClosingHeight = 100
  1715  	chanArb.cfg.CloseType = channeldb.RemoteForceClose
  1716  
  1717  	if err := chanArb.Start(nil); err != nil {
  1718  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1719  	}
  1720  
  1721  	// It should not advance its state beyond StateContractClosed, since
  1722  	// fetching resolutions fails.
  1723  	chanArbCtx.AssertStateTransitions(StateContractClosed)
  1724  
  1725  	// It should not advance further, however, as fetching resolutions
  1726  	// failed.
  1727  	time.Sleep(100 * time.Millisecond)
  1728  	if log.state != StateContractClosed {
  1729  		t.Fatalf("expected to stay in StateContractClosed")
  1730  	}
  1731  	chanArb.Stop()
  1732  }
  1733  
  1734  // TestChannelArbitratorAlreadyForceClosed ensures that we cannot force close a
  1735  // channel that is already in the process of doing so.
  1736  func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
  1737  	t.Parallel()
  1738  
  1739  	// We'll create the arbitrator and its backing log to signal that it's
  1740  	// already in the process of being force closed.
  1741  	log := &mockArbitratorLog{
  1742  		state: StateCommitmentBroadcasted,
  1743  	}
  1744  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  1745  	if err != nil {
  1746  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1747  	}
  1748  	chanArb := chanArbCtx.chanArb
  1749  	if err := chanArb.Start(nil); err != nil {
  1750  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1751  	}
  1752  	defer chanArb.Stop()
  1753  
  1754  	// Then, we'll create a request to signal a force close request to the
  1755  	// channel arbitrator.
  1756  	errChan := make(chan error, 1)
  1757  	respChan := make(chan *wire.MsgTx, 1)
  1758  
  1759  	select {
  1760  	case chanArb.forceCloseReqs <- &forceCloseReq{
  1761  		closeTx: respChan,
  1762  		errResp: errChan,
  1763  	}:
  1764  	case <-chanArb.quit:
  1765  	}
  1766  
  1767  	// Finally, we should ensure that we are not able to do so by seeing
  1768  	// the expected errAlreadyForceClosed error.
  1769  	select {
  1770  	case err = <-errChan:
  1771  		if err != errAlreadyForceClosed {
  1772  			t.Fatalf("expected errAlreadyForceClosed, got %v", err)
  1773  		}
  1774  	case <-time.After(time.Second):
  1775  		t.Fatal("expected to receive error response")
  1776  	}
  1777  }
  1778  
  1779  // TestChannelArbitratorDanglingCommitForceClose tests that if there're HTLCs
  1780  // on the remote party's commitment, but not ours, and they're about to time
  1781  // out, then we'll go on chain so we can cancel back the HTLCs on the incoming
  1782  // commitment.
  1783  func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
  1784  	t.Parallel()
  1785  
  1786  	type testCase struct {
  1787  		htlcExpired       bool
  1788  		remotePendingHTLC bool
  1789  		confCommit        HtlcSetKey
  1790  	}
  1791  	var testCases []testCase
  1792  
  1793  	testOptions := []bool{true, false}
  1794  	confOptions := []HtlcSetKey{
  1795  		LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet,
  1796  	}
  1797  	for _, htlcExpired := range testOptions {
  1798  		for _, remotePendingHTLC := range testOptions {
  1799  			for _, commitConf := range confOptions {
  1800  				switch {
  1801  				// If the HTLC is on the remote commitment, and
  1802  				// that one confirms, then there's no special
  1803  				// behavior, we should play all the HTLCs on
  1804  				// that remote commitment as normal.
  1805  				case !remotePendingHTLC && commitConf == RemoteHtlcSet:
  1806  					fallthrough
  1807  
  1808  				// If the HTLC is on the remote pending, and
  1809  				// that confirms, then we don't have any
  1810  				// special actions.
  1811  				case remotePendingHTLC && commitConf == RemotePendingHtlcSet:
  1812  					continue
  1813  				}
  1814  
  1815  				testCases = append(testCases, testCase{
  1816  					htlcExpired:       htlcExpired,
  1817  					remotePendingHTLC: remotePendingHTLC,
  1818  					confCommit:        commitConf,
  1819  				})
  1820  			}
  1821  		}
  1822  	}
  1823  
  1824  	for _, testCase := range testCases {
  1825  		testCase := testCase
  1826  		testName := fmt.Sprintf("testCase: htlcExpired=%v,"+
  1827  			"remotePendingHTLC=%v,remotePendingCommitConf=%v",
  1828  			testCase.htlcExpired, testCase.remotePendingHTLC,
  1829  			testCase.confCommit)
  1830  
  1831  		t.Run(testName, func(t *testing.T) {
  1832  			t.Parallel()
  1833  
  1834  			arbLog := &mockArbitratorLog{
  1835  				state:     StateDefault,
  1836  				newStates: make(chan ArbitratorState, 5),
  1837  				resolvers: make(map[ContractResolver]struct{}),
  1838  			}
  1839  
  1840  			chanArbCtx, err := createTestChannelArbitrator(
  1841  				t, arbLog,
  1842  			)
  1843  			if err != nil {
  1844  				t.Fatalf("unable to create ChannelArbitrator: %v", err)
  1845  			}
  1846  			chanArb := chanArbCtx.chanArb
  1847  			if err := chanArb.Start(nil); err != nil {
  1848  				t.Fatalf("unable to start ChannelArbitrator: %v", err)
  1849  			}
  1850  			defer chanArb.Stop()
  1851  
  1852  			// Now that our channel arb has started, we'll set up
  1853  			// its contract signals channel so we can send it
  1854  			// various HTLC updates for this test.
  1855  			htlcUpdates := make(chan *ContractUpdate)
  1856  			signals := &ContractSignals{
  1857  				HtlcUpdates: htlcUpdates,
  1858  				ShortChanID: lnwire.ShortChannelID{},
  1859  			}
  1860  			chanArb.UpdateContractSignals(signals)
  1861  
  1862  			htlcKey := RemoteHtlcSet
  1863  			if testCase.remotePendingHTLC {
  1864  				htlcKey = RemotePendingHtlcSet
  1865  			}
  1866  
  1867  			// Next, we'll send it a new HTLC that is set to expire
  1868  			// in 10 blocks, this HTLC will only appear on the
  1869  			// commitment transaction of the _remote_ party.
  1870  			htlcIndex := uint64(99)
  1871  			htlcExpiry := uint32(10)
  1872  			danglingHTLC := channeldb.HTLC{
  1873  				Incoming:      false,
  1874  				Amt:           10000,
  1875  				HtlcIndex:     htlcIndex,
  1876  				RefundTimeout: htlcExpiry,
  1877  			}
  1878  			htlcUpdates <- &ContractUpdate{
  1879  				HtlcKey: htlcKey,
  1880  				Htlcs:   []channeldb.HTLC{danglingHTLC},
  1881  			}
  1882  
  1883  			// At this point, we now have a split commitment state
  1884  			// from the PoV of the channel arb. There's now an HTLC
  1885  			// that only exists on the commitment transaction of
  1886  			// the remote party.
  1887  			errChan := make(chan error, 1)
  1888  			respChan := make(chan *wire.MsgTx, 1)
  1889  			switch {
  1890  			// If we want an HTLC expiration trigger, then We'll
  1891  			// now mine a block (height 5), which is 5 blocks away
  1892  			// (our grace delta) from the expiry of that HTLC.
  1893  			case testCase.htlcExpired:
  1894  				chanArbCtx.chanArb.blocks <- 5
  1895  
  1896  			// Otherwise, we'll just trigger a regular force close
  1897  			// request.
  1898  			case !testCase.htlcExpired:
  1899  				chanArb.forceCloseReqs <- &forceCloseReq{
  1900  					errResp: errChan,
  1901  					closeTx: respChan,
  1902  				}
  1903  
  1904  			}
  1905  
  1906  			// At this point, the resolver should now have
  1907  			// determined that it needs to go to chain in order to
  1908  			// block off the redemption path so it can cancel the
  1909  			// incoming HTLC.
  1910  			chanArbCtx.AssertStateTransitions(
  1911  				StateBroadcastCommit,
  1912  				StateCommitmentBroadcasted,
  1913  			)
  1914  
  1915  			// Next we'll craft a fake commitment transaction to
  1916  			// send to signal that the channel has closed out on
  1917  			// chain.
  1918  			closeTx := &wire.MsgTx{
  1919  				TxIn: []*wire.TxIn{
  1920  					{
  1921  						PreviousOutPoint: wire.OutPoint{},
  1922  						SignatureScript: []byte{
  1923  							0x9,
  1924  						},
  1925  					},
  1926  				},
  1927  			}
  1928  
  1929  			// We'll now signal to the channel arb that the HTLC
  1930  			// has fully closed on chain. Our local commit set
  1931  			// shows now HTLC on our commitment, but one on the
  1932  			// remote commitment. This should result in the HTLC
  1933  			// being canalled back. Also note that there're no HTLC
  1934  			// resolutions sent since we have none on our
  1935  			// commitment transaction.
  1936  			uniCloseInfo := &LocalUnilateralCloseInfo{
  1937  				SpendDetail: &chainntnfs.SpendDetail{},
  1938  				LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
  1939  					CloseTx:         closeTx,
  1940  					HtlcResolutions: &lnwallet.HtlcResolutions{},
  1941  				},
  1942  				ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
  1943  				CommitSet: CommitSet{
  1944  					ConfCommitKey: &testCase.confCommit,
  1945  					HtlcSets:      make(map[HtlcSetKey][]channeldb.HTLC),
  1946  				},
  1947  			}
  1948  
  1949  			// If the HTLC was meant to expire, then we'll mark the
  1950  			// closing transaction at the proper expiry height
  1951  			// since our comparison "need to timeout" comparison is
  1952  			// based on the confirmation height.
  1953  			if testCase.htlcExpired {
  1954  				uniCloseInfo.SpendDetail.SpendingHeight = 5
  1955  			}
  1956  
  1957  			// Depending on if we're testing the remote pending
  1958  			// commitment or not, we'll populate either a fake
  1959  			// dangling remote commitment, or a regular locked in
  1960  			// one.
  1961  			htlcs := []channeldb.HTLC{danglingHTLC}
  1962  			if testCase.remotePendingHTLC {
  1963  				uniCloseInfo.CommitSet.HtlcSets[RemotePendingHtlcSet] = htlcs
  1964  			} else {
  1965  				uniCloseInfo.CommitSet.HtlcSets[RemoteHtlcSet] = htlcs
  1966  			}
  1967  
  1968  			chanArb.cfg.ChainEvents.LocalUnilateralClosure <- uniCloseInfo
  1969  
  1970  			// The channel arb should now transition to waiting
  1971  			// until the HTLCs have been fully resolved.
  1972  			chanArbCtx.AssertStateTransitions(
  1973  				StateContractClosed,
  1974  				StateWaitingFullResolution,
  1975  			)
  1976  
  1977  			// Now that we've sent this signal, we should have that
  1978  			// HTLC be canceled back immediately.
  1979  			select {
  1980  			case msgs := <-chanArbCtx.resolutions:
  1981  				if len(msgs) != 1 {
  1982  					t.Fatalf("expected 1 message, "+
  1983  						"instead got %v", len(msgs))
  1984  				}
  1985  
  1986  				if msgs[0].HtlcIndex != htlcIndex {
  1987  					t.Fatalf("wrong htlc index: expected %v, got %v",
  1988  						htlcIndex, msgs[0].HtlcIndex)
  1989  				}
  1990  			case <-time.After(defaultTimeout):
  1991  				t.Fatalf("resolution msgs not sent")
  1992  			}
  1993  
  1994  			// There's no contract to send a fully resolve message,
  1995  			// so instead, we'll mine another block which'll cause
  1996  			// it to re-examine its state and realize there're no
  1997  			// more HTLCs.
  1998  			chanArbCtx.chanArb.blocks <- 6
  1999  			chanArbCtx.AssertStateTransitions(StateFullyResolved)
  2000  		})
  2001  	}
  2002  }
  2003  
  2004  // TestChannelArbitratorPendingExpiredHTLC tests that if we have pending htlc
  2005  // that is expired we will only go to chain if we are running at least the
  2006  // time defined in PaymentsExpirationGracePeriod.
  2007  // During this time the remote party is expected to send his updates and cancel
  2008  // The htlc.
  2009  func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) {
  2010  	t.Parallel()
  2011  
  2012  	// We'll create the arbitrator and its backing log in a default state.
  2013  	log := &mockArbitratorLog{
  2014  		state:     StateDefault,
  2015  		newStates: make(chan ArbitratorState, 5),
  2016  		resolvers: make(map[ContractResolver]struct{}),
  2017  	}
  2018  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  2019  	if err != nil {
  2020  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  2021  	}
  2022  	chanArb := chanArbCtx.chanArb
  2023  
  2024  	// We'll inject a test clock implementation so we can control the uptime.
  2025  	startTime := time.Date(2020, time.February, 3, 13, 0, 0, 0, time.UTC)
  2026  	testClock := clock.NewTestClock(startTime)
  2027  	chanArb.cfg.Clock = testClock
  2028  
  2029  	// We also configure the grace period and the IsForwardedHTLC to identify
  2030  	// the htlc as our initiated payment.
  2031  	chanArb.cfg.PaymentsExpirationGracePeriod = time.Second * 15
  2032  	chanArb.cfg.IsForwardedHTLC = func(chanID lnwire.ShortChannelID,
  2033  		htlcIndex uint64) bool {
  2034  
  2035  		return false
  2036  	}
  2037  
  2038  	if err := chanArb.Start(nil); err != nil {
  2039  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  2040  	}
  2041  	defer func() {
  2042  		if err := chanArb.Stop(); err != nil {
  2043  			t.Fatalf("unable to stop chan arb: %v", err)
  2044  		}
  2045  	}()
  2046  
  2047  	// Now that our channel arb has started, we'll set up
  2048  	// its contract signals channel so we can send it
  2049  	// various HTLC updates for this test.
  2050  	htlcUpdates := make(chan *ContractUpdate)
  2051  	signals := &ContractSignals{
  2052  		HtlcUpdates: htlcUpdates,
  2053  		ShortChanID: lnwire.ShortChannelID{},
  2054  	}
  2055  	chanArb.UpdateContractSignals(signals)
  2056  
  2057  	// Next, we'll send it a new HTLC that is set to expire
  2058  	// in 10 blocks.
  2059  	htlcIndex := uint64(99)
  2060  	htlcExpiry := uint32(10)
  2061  	pendingHTLC := channeldb.HTLC{
  2062  		Incoming:      false,
  2063  		Amt:           10000,
  2064  		HtlcIndex:     htlcIndex,
  2065  		RefundTimeout: htlcExpiry,
  2066  	}
  2067  	htlcUpdates <- &ContractUpdate{
  2068  		HtlcKey: RemoteHtlcSet,
  2069  		Htlcs:   []channeldb.HTLC{pendingHTLC},
  2070  	}
  2071  
  2072  	// We will advance the uptime to 10 seconds which should be still within
  2073  	// the grace period and should not trigger going to chain.
  2074  	testClock.SetTime(startTime.Add(time.Second * 10))
  2075  	chanArbCtx.chanArb.blocks <- 5
  2076  	chanArbCtx.AssertState(StateDefault)
  2077  
  2078  	// We will advance the uptime to 16 seconds which should trigger going
  2079  	// to chain.
  2080  	testClock.SetTime(startTime.Add(time.Second * 16))
  2081  	chanArbCtx.chanArb.blocks <- 6
  2082  	chanArbCtx.AssertStateTransitions(
  2083  		StateBroadcastCommit,
  2084  		StateCommitmentBroadcasted,
  2085  	)
  2086  }
  2087  
  2088  // TestRemoteCloseInitiator tests the setting of close initiator statuses
  2089  // for remote force closes and breaches.
  2090  func TestRemoteCloseInitiator(t *testing.T) {
  2091  	// getCloseSummary returns a unilateral close summary for the channel
  2092  	// provided.
  2093  	getCloseSummary := func(channel *channeldb.OpenChannel) *RemoteUnilateralCloseInfo {
  2094  		return &RemoteUnilateralCloseInfo{
  2095  			UnilateralCloseSummary: &lnwallet.UnilateralCloseSummary{
  2096  				SpendDetail: &chainntnfs.SpendDetail{
  2097  					SpenderTxHash: &chainhash.Hash{},
  2098  					SpendingTx: &wire.MsgTx{
  2099  						TxIn:  []*wire.TxIn{},
  2100  						TxOut: []*wire.TxOut{},
  2101  					},
  2102  				},
  2103  				ChannelCloseSummary: channeldb.ChannelCloseSummary{
  2104  					ChanPoint:         channel.FundingOutpoint,
  2105  					RemotePub:         channel.IdentityPub,
  2106  					SettledBalance:    dcrutil.Amount(500),
  2107  					TimeLockedBalance: dcrutil.Amount(10000),
  2108  					IsPending:         false,
  2109  				},
  2110  				HtlcResolutions: &lnwallet.HtlcResolutions{},
  2111  			},
  2112  		}
  2113  	}
  2114  
  2115  	tests := []struct {
  2116  		name string
  2117  
  2118  		// notifyClose sends the appropriate chain event to indicate
  2119  		// that the channel has closed. The event subscription channel
  2120  		// is expected to be buffered, as is the default for test
  2121  		// channel arbitrators.
  2122  		notifyClose func(sub *ChainEventSubscription,
  2123  			channel *channeldb.OpenChannel)
  2124  
  2125  		// expectedStates is the set of states we expect the arbitrator
  2126  		// to progress through.
  2127  		expectedStates []ArbitratorState
  2128  	}{
  2129  		{
  2130  			name: "force close",
  2131  			notifyClose: func(sub *ChainEventSubscription,
  2132  				channel *channeldb.OpenChannel) {
  2133  
  2134  				s := getCloseSummary(channel)
  2135  				sub.RemoteUnilateralClosure <- s
  2136  			},
  2137  			expectedStates: []ArbitratorState{
  2138  				StateContractClosed, StateFullyResolved,
  2139  			},
  2140  		},
  2141  	}
  2142  
  2143  	for _, test := range tests {
  2144  		test := test
  2145  
  2146  		t.Run(test.name, func(t *testing.T) {
  2147  			t.Parallel()
  2148  
  2149  			// First, create alice's channel.
  2150  			alice, _, cleanUp, err := lnwallet.CreateTestChannels(
  2151  				channeldb.SingleFunderTweaklessBit,
  2152  			)
  2153  			if err != nil {
  2154  				t.Fatalf("unable to create test channels: %v",
  2155  					err)
  2156  			}
  2157  			defer cleanUp()
  2158  
  2159  			// Create a mock log which will not block the test's
  2160  			// expected number of transitions transitions, and has
  2161  			// no commit resolutions so that the channel will
  2162  			// resolve immediately.
  2163  			log := &mockArbitratorLog{
  2164  				state: StateDefault,
  2165  				newStates: make(chan ArbitratorState,
  2166  					len(test.expectedStates)),
  2167  				resolutions: &ContractResolutions{
  2168  					CommitHash:       chainhash.Hash{},
  2169  					CommitResolution: nil,
  2170  				},
  2171  			}
  2172  
  2173  			// Mock marking the channel as closed, we only care
  2174  			// about setting of channel status.
  2175  			mockMarkClosed := func(_ *channeldb.ChannelCloseSummary,
  2176  				statuses ...channeldb.ChannelStatus) error {
  2177  				for _, status := range statuses {
  2178  					err := alice.State().ApplyChanStatus(status)
  2179  					if err != nil {
  2180  						return err
  2181  					}
  2182  				}
  2183  				return nil
  2184  			}
  2185  
  2186  			chanArbCtx, err := createTestChannelArbitrator(
  2187  				t, log, withMarkClosed(mockMarkClosed),
  2188  			)
  2189  			if err != nil {
  2190  				t.Fatalf("unable to create "+
  2191  					"ChannelArbitrator: %v", err)
  2192  			}
  2193  			chanArb := chanArbCtx.chanArb
  2194  
  2195  			if err := chanArb.Start(nil); err != nil {
  2196  				t.Fatalf("unable to start "+
  2197  					"ChannelArbitrator: %v", err)
  2198  			}
  2199  			defer func() {
  2200  				if err := chanArb.Stop(); err != nil {
  2201  					t.Fatal(err)
  2202  				}
  2203  			}()
  2204  
  2205  			// It should start out in the default state.
  2206  			chanArbCtx.AssertState(StateDefault)
  2207  
  2208  			// Notify the close event.
  2209  			test.notifyClose(chanArb.cfg.ChainEvents, alice.State())
  2210  
  2211  			// Check that the channel transitions as expected.
  2212  			chanArbCtx.AssertStateTransitions(
  2213  				test.expectedStates...,
  2214  			)
  2215  
  2216  			// It should also mark the channel as resolved.
  2217  			select {
  2218  			case <-chanArbCtx.resolvedChan:
  2219  				// Expected.
  2220  			case <-time.After(defaultTimeout):
  2221  				t.Fatalf("contract was not resolved")
  2222  			}
  2223  
  2224  			// Check that alice has the status we expect.
  2225  			if !alice.State().HasChanStatus(
  2226  				channeldb.ChanStatusRemoteCloseInitiator,
  2227  			) {
  2228  				t.Fatalf("expected remote close initiator, "+
  2229  					"got: %v", alice.State().ChanStatus())
  2230  			}
  2231  		})
  2232  	}
  2233  }
  2234  
  2235  // TestFindCommitmentDeadline tests the logic used to determine confirmation
  2236  // deadline is implemented as expected.
  2237  func TestFindCommitmentDeadline(t *testing.T) {
  2238  	// Create a testing channel arbitrator.
  2239  	log := &mockArbitratorLog{
  2240  		state:     StateDefault,
  2241  		newStates: make(chan ArbitratorState, 5),
  2242  	}
  2243  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  2244  	require.NoError(t, err, "unable to create ChannelArbitrator")
  2245  
  2246  	// Add a dummy payment hash to the preimage lookup.
  2247  	rHash := [lntypes.PreimageSize]byte{1, 2, 3}
  2248  	mockPreimageDB := newMockWitnessBeacon()
  2249  	mockPreimageDB.lookupPreimage[rHash] = rHash
  2250  
  2251  	// Attack a mock PreimageDB and Registry to channel arbitrator.
  2252  	chanArb := chanArbCtx.chanArb
  2253  	chanArb.cfg.PreimageDB = mockPreimageDB
  2254  	chanArb.cfg.Registry = &mockRegistry{}
  2255  
  2256  	htlcIndexBase := uint64(99)
  2257  	heightHint := uint32(1000)
  2258  	htlcExpiryBase := heightHint + uint32(10)
  2259  
  2260  	// Create four testing HTLCs.
  2261  	htlcDust := channeldb.HTLC{
  2262  		HtlcIndex:     htlcIndexBase + 1,
  2263  		RefundTimeout: htlcExpiryBase + 1,
  2264  		OutputIndex:   -1,
  2265  	}
  2266  	htlcSmallExipry := channeldb.HTLC{
  2267  		HtlcIndex:     htlcIndexBase + 2,
  2268  		RefundTimeout: htlcExpiryBase + 2,
  2269  	}
  2270  
  2271  	htlcPreimage := channeldb.HTLC{
  2272  		HtlcIndex:     htlcIndexBase + 3,
  2273  		RefundTimeout: htlcExpiryBase + 3,
  2274  		RHash:         rHash,
  2275  	}
  2276  	htlcLargeExpiry := channeldb.HTLC{
  2277  		HtlcIndex:     htlcIndexBase + 4,
  2278  		RefundTimeout: htlcExpiryBase + 100,
  2279  	}
  2280  	htlcExpired := channeldb.HTLC{
  2281  		HtlcIndex:     htlcIndexBase + 5,
  2282  		RefundTimeout: heightHint,
  2283  	}
  2284  
  2285  	makeHTLCSet := func(incoming, outgoing channeldb.HTLC) htlcSet {
  2286  		return htlcSet{
  2287  			incomingHTLCs: map[uint64]channeldb.HTLC{
  2288  				incoming.HtlcIndex: incoming,
  2289  			},
  2290  			outgoingHTLCs: map[uint64]channeldb.HTLC{
  2291  				outgoing.HtlcIndex: outgoing,
  2292  			},
  2293  		}
  2294  	}
  2295  
  2296  	testCases := []struct {
  2297  		name     string
  2298  		htlcs    htlcSet
  2299  		err      error
  2300  		deadline uint32
  2301  	}{
  2302  		{
  2303  			// When we have no HTLCs, the default value should be
  2304  			// used.
  2305  			name:     "use default conf target",
  2306  			htlcs:    htlcSet{},
  2307  			err:      nil,
  2308  			deadline: anchorSweepConfTarget,
  2309  		},
  2310  		{
  2311  			// When we have a preimage available in the local HTLC
  2312  			// set, its CLTV should be used.
  2313  			name:     "use htlc with preimage available",
  2314  			htlcs:    makeHTLCSet(htlcPreimage, htlcLargeExpiry),
  2315  			err:      nil,
  2316  			deadline: htlcPreimage.RefundTimeout - heightHint,
  2317  		},
  2318  		{
  2319  			// When the HTLC in the local set is not preimage
  2320  			// available, we should not use its CLTV even its value
  2321  			// is smaller.
  2322  			name:     "use htlc with no preimage available",
  2323  			htlcs:    makeHTLCSet(htlcSmallExipry, htlcLargeExpiry),
  2324  			err:      nil,
  2325  			deadline: htlcLargeExpiry.RefundTimeout - heightHint,
  2326  		},
  2327  		{
  2328  			// When we have dust HTLCs, their CLTVs should NOT be
  2329  			// used even the values are smaller.
  2330  			name:     "ignore dust HTLCs",
  2331  			htlcs:    makeHTLCSet(htlcPreimage, htlcDust),
  2332  			err:      nil,
  2333  			deadline: htlcPreimage.RefundTimeout - heightHint,
  2334  		},
  2335  		{
  2336  			// When we've reached our deadline, use conf target of
  2337  			// 1 as our deadline.
  2338  			name:     "use conf target 1",
  2339  			htlcs:    makeHTLCSet(htlcPreimage, htlcExpired),
  2340  			err:      nil,
  2341  			deadline: 1,
  2342  		},
  2343  	}
  2344  
  2345  	for _, tc := range testCases {
  2346  		tc := tc
  2347  		t.Run(tc.name, func(t *testing.T) {
  2348  			t.Parallel()
  2349  			deadline, err := chanArb.findCommitmentDeadline(
  2350  				heightHint, tc.htlcs,
  2351  			)
  2352  
  2353  			require.Equal(t, tc.err, err)
  2354  			require.Equal(t, tc.deadline, deadline)
  2355  		})
  2356  	}
  2357  
  2358  }
  2359  
  2360  // TestSweepAnchors checks the sweep transactions are created using the
  2361  // expected deadlines for different anchor resolutions.
  2362  func TestSweepAnchors(t *testing.T) {
  2363  	// Create a testing channel arbitrator.
  2364  	log := &mockArbitratorLog{
  2365  		state:     StateDefault,
  2366  		newStates: make(chan ArbitratorState, 5),
  2367  	}
  2368  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  2369  	require.NoError(t, err, "unable to create ChannelArbitrator")
  2370  
  2371  	// Add a dummy payment hash to the preimage lookup.
  2372  	rHash := [lntypes.PreimageSize]byte{1, 2, 3}
  2373  	mockPreimageDB := newMockWitnessBeacon()
  2374  	mockPreimageDB.lookupPreimage[rHash] = rHash
  2375  
  2376  	// Attack a mock PreimageDB and Registry to channel arbitrator.
  2377  	chanArb := chanArbCtx.chanArb
  2378  	chanArb.cfg.PreimageDB = mockPreimageDB
  2379  	chanArb.cfg.Registry = &mockRegistry{}
  2380  
  2381  	// Set current block height.
  2382  	heightHint := uint32(1000)
  2383  	chanArbCtx.chanArb.blocks <- int32(heightHint)
  2384  
  2385  	htlcIndexBase := uint64(99)
  2386  	htlcExpiryBase := heightHint + uint32(10)
  2387  
  2388  	// Create three testing HTLCs.
  2389  	htlcDust := channeldb.HTLC{
  2390  		HtlcIndex:     htlcIndexBase + 1,
  2391  		RefundTimeout: htlcExpiryBase + 1,
  2392  		OutputIndex:   -1,
  2393  	}
  2394  	htlcWithPreimage := channeldb.HTLC{
  2395  		HtlcIndex:     htlcIndexBase + 2,
  2396  		RefundTimeout: htlcExpiryBase + 2,
  2397  		RHash:         rHash,
  2398  	}
  2399  	htlcSmallExipry := channeldb.HTLC{
  2400  		HtlcIndex:     htlcIndexBase + 3,
  2401  		RefundTimeout: htlcExpiryBase + 3,
  2402  	}
  2403  
  2404  	// Setup our local HTLC set such that we will use the HTLC's CLTV from
  2405  	// the incoming HTLC set.
  2406  	expectedLocalDeadline := htlcWithPreimage.RefundTimeout - heightHint
  2407  	chanArb.activeHTLCs[LocalHtlcSet] = htlcSet{
  2408  		incomingHTLCs: map[uint64]channeldb.HTLC{
  2409  			htlcWithPreimage.HtlcIndex: htlcWithPreimage,
  2410  		},
  2411  		outgoingHTLCs: map[uint64]channeldb.HTLC{
  2412  			htlcDust.HtlcIndex: htlcDust,
  2413  		},
  2414  	}
  2415  
  2416  	// Setup our remote HTLC set such that no valid HTLCs can be used, thus
  2417  	// we default to anchorSweepConfTarget.
  2418  	expectedRemoteDeadline := anchorSweepConfTarget
  2419  	chanArb.activeHTLCs[RemoteHtlcSet] = htlcSet{
  2420  		incomingHTLCs: map[uint64]channeldb.HTLC{
  2421  			htlcSmallExipry.HtlcIndex: htlcSmallExipry,
  2422  		},
  2423  		outgoingHTLCs: map[uint64]channeldb.HTLC{
  2424  			htlcDust.HtlcIndex: htlcDust,
  2425  		},
  2426  	}
  2427  
  2428  	// Setup out pending remote HTLC set such that we will use the HTLC's
  2429  	// CLTV from the outgoing HTLC set.
  2430  	expectedPendingDeadline := htlcSmallExipry.RefundTimeout - heightHint
  2431  	chanArb.activeHTLCs[RemotePendingHtlcSet] = htlcSet{
  2432  		incomingHTLCs: map[uint64]channeldb.HTLC{
  2433  			htlcDust.HtlcIndex: htlcDust,
  2434  		},
  2435  		outgoingHTLCs: map[uint64]channeldb.HTLC{
  2436  			htlcSmallExipry.HtlcIndex: htlcSmallExipry,
  2437  		},
  2438  	}
  2439  
  2440  	// Create AnchorResolutions.
  2441  	anchors := &lnwallet.AnchorResolutions{
  2442  		Local: &lnwallet.AnchorResolution{
  2443  			AnchorSignDescriptor: input.SignDescriptor{
  2444  				Output: &wire.TxOut{Value: 1},
  2445  			},
  2446  		},
  2447  		Remote: &lnwallet.AnchorResolution{
  2448  			AnchorSignDescriptor: input.SignDescriptor{
  2449  				Output: &wire.TxOut{Value: 1},
  2450  			},
  2451  		},
  2452  		RemotePending: &lnwallet.AnchorResolution{
  2453  			AnchorSignDescriptor: input.SignDescriptor{
  2454  				Output: &wire.TxOut{Value: 1},
  2455  			},
  2456  		},
  2457  	}
  2458  
  2459  	// Sweep anchors and check there's no error.
  2460  	err = chanArb.sweepAnchors(anchors, heightHint)
  2461  	require.NoError(t, err)
  2462  
  2463  	// Verify deadlines are used as expected.
  2464  	deadlines := chanArbCtx.sweeper.deadlines
  2465  	// Since there's no guarantee of the deadline orders, we sort it here
  2466  	// so they can be compared.
  2467  	sort.Ints(deadlines) // [12, 13, 144]
  2468  	require.EqualValues(
  2469  		t, expectedLocalDeadline, deadlines[0],
  2470  		"local deadline not matched",
  2471  	)
  2472  	require.EqualValues(
  2473  		t, expectedPendingDeadline, deadlines[1],
  2474  		"pending remote deadline not matched",
  2475  	)
  2476  	require.EqualValues(
  2477  		t, expectedRemoteDeadline, deadlines[2],
  2478  		"remote deadline not matched",
  2479  	)
  2480  
  2481  }
  2482  
  2483  // TestChannelArbitratorAnchors asserts that the commitment tx anchor is swept.
  2484  func TestChannelArbitratorAnchors(t *testing.T) {
  2485  	log := &mockArbitratorLog{
  2486  		state:     StateDefault,
  2487  		newStates: make(chan ArbitratorState, 5),
  2488  	}
  2489  
  2490  	chanArbCtx, err := createTestChannelArbitrator(t, log)
  2491  	if err != nil {
  2492  		t.Fatalf("unable to create ChannelArbitrator: %v", err)
  2493  	}
  2494  
  2495  	// Replace our mocked put report function with one which will push
  2496  	// reports into a channel for us to consume. We update this function
  2497  	// because our resolver will be created from the existing chanArb cfg.
  2498  	reports := make(chan *channeldb.ResolverReport)
  2499  	chanArbCtx.chanArb.cfg.PutResolverReport = putResolverReportInChannel(
  2500  		reports,
  2501  	)
  2502  
  2503  	// Add a dummy payment hash to the preimage lookup.
  2504  	rHash := [lntypes.PreimageSize]byte{1, 2, 3}
  2505  	mockPreimageDB := newMockWitnessBeacon()
  2506  	mockPreimageDB.lookupPreimage[rHash] = rHash
  2507  
  2508  	// Attack a mock PreimageDB and Registry to channel arbitrator.
  2509  	chanArb := chanArbCtx.chanArb
  2510  	chanArb.cfg.PreimageDB = mockPreimageDB
  2511  	chanArb.cfg.Registry = &mockRegistry{}
  2512  
  2513  	// Setup two pre-confirmation anchor resolutions on the mock channel.
  2514  	chanArb.cfg.Channel.(*mockChannel).anchorResolutions =
  2515  		&lnwallet.AnchorResolutions{
  2516  			Local: &lnwallet.AnchorResolution{
  2517  				AnchorSignDescriptor: input.SignDescriptor{
  2518  					Output: &wire.TxOut{Value: 1},
  2519  				},
  2520  			},
  2521  			Remote: &lnwallet.AnchorResolution{
  2522  				AnchorSignDescriptor: input.SignDescriptor{
  2523  					Output: &wire.TxOut{Value: 1},
  2524  				},
  2525  			},
  2526  		}
  2527  
  2528  	if err := chanArb.Start(nil); err != nil {
  2529  		t.Fatalf("unable to start ChannelArbitrator: %v", err)
  2530  	}
  2531  	defer func() {
  2532  		if err := chanArb.Stop(); err != nil {
  2533  			t.Fatal(err)
  2534  		}
  2535  	}()
  2536  
  2537  	// Create htlcUpdates channel.
  2538  	htlcUpdates := make(chan *ContractUpdate)
  2539  
  2540  	signals := &ContractSignals{
  2541  		HtlcUpdates: htlcUpdates,
  2542  		ShortChanID: lnwire.ShortChannelID{},
  2543  	}
  2544  	chanArb.UpdateContractSignals(signals)
  2545  
  2546  	// Set current block height.
  2547  	heightHint := uint32(1000)
  2548  	chanArbCtx.chanArb.blocks <- int32(heightHint)
  2549  
  2550  	// Create testing HTLCs.
  2551  	htlcExpiryBase := heightHint + uint32(10)
  2552  	htlcWithPreimage := channeldb.HTLC{
  2553  		HtlcIndex:     99,
  2554  		RefundTimeout: htlcExpiryBase + 2,
  2555  		RHash:         rHash,
  2556  		Incoming:      true,
  2557  	}
  2558  	htlc := channeldb.HTLC{
  2559  		HtlcIndex:     100,
  2560  		RefundTimeout: htlcExpiryBase + 3,
  2561  	}
  2562  
  2563  	// We now send two HTLC updates, one for local HTLC set and the other
  2564  	// for remote HTLC set.
  2565  	htlcUpdates <- &ContractUpdate{
  2566  		HtlcKey: LocalHtlcSet,
  2567  		// This will make the deadline of the local anchor resolution
  2568  		// to be htlcWithPreimage's CLTV minus heightHint since the
  2569  		// incoming HTLC (toLocalHTLCs) has a lower CLTV value and is
  2570  		// preimage available.
  2571  		Htlcs: []channeldb.HTLC{htlc, htlcWithPreimage},
  2572  	}
  2573  	htlcUpdates <- &ContractUpdate{
  2574  		HtlcKey: RemoteHtlcSet,
  2575  		// This will make the deadline of the remote anchor resolution
  2576  		// to be htlcWithPreimage's CLTV minus heightHint because the
  2577  		// incoming HTLC (toRemoteHTLCs) has a lower CLTV.
  2578  		Htlcs: []channeldb.HTLC{htlc, htlcWithPreimage},
  2579  	}
  2580  
  2581  	errChan := make(chan error, 1)
  2582  	respChan := make(chan *wire.MsgTx, 1)
  2583  
  2584  	// With the channel found, and the request crafted, we'll send over a
  2585  	// force close request to the arbitrator that watches this channel.
  2586  	chanArb.forceCloseReqs <- &forceCloseReq{
  2587  		errResp: errChan,
  2588  		closeTx: respChan,
  2589  	}
  2590  
  2591  	// The force close request should trigger broadcast of the commitment
  2592  	// transaction.
  2593  	chanArbCtx.AssertStateTransitions(
  2594  		StateBroadcastCommit,
  2595  		StateCommitmentBroadcasted,
  2596  	)
  2597  
  2598  	// With the commitment tx still unconfirmed, we expect sweep attempts
  2599  	// for all three versions of the commitment transaction.
  2600  	<-chanArbCtx.sweeper.sweptInputs
  2601  	<-chanArbCtx.sweeper.sweptInputs
  2602  
  2603  	select {
  2604  	case <-respChan:
  2605  	case <-time.After(5 * time.Second):
  2606  		t.Fatalf("no response received")
  2607  	}
  2608  
  2609  	select {
  2610  	case err := <-errChan:
  2611  		if err != nil {
  2612  			t.Fatalf("error force closing channel: %v", err)
  2613  		}
  2614  	case <-time.After(5 * time.Second):
  2615  		t.Fatalf("no response received")
  2616  	}
  2617  
  2618  	// Now notify about the local force close getting confirmed.
  2619  	closeTx := &wire.MsgTx{
  2620  		TxIn: []*wire.TxIn{
  2621  			{
  2622  				PreviousOutPoint: wire.OutPoint{},
  2623  				SignatureScript: []byte{
  2624  					0x1,
  2625  					0x2,
  2626  				},
  2627  			},
  2628  		},
  2629  	}
  2630  
  2631  	anchorResolution := &lnwallet.AnchorResolution{
  2632  		AnchorSignDescriptor: input.SignDescriptor{
  2633  			Output: &wire.TxOut{
  2634  				Value: 1,
  2635  			},
  2636  		},
  2637  	}
  2638  
  2639  	chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
  2640  		SpendDetail: &chainntnfs.SpendDetail{},
  2641  		LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
  2642  			CloseTx:          closeTx,
  2643  			HtlcResolutions:  &lnwallet.HtlcResolutions{},
  2644  			AnchorResolution: anchorResolution,
  2645  		},
  2646  		ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
  2647  		CommitSet: CommitSet{
  2648  			ConfCommitKey: &LocalHtlcSet,
  2649  			HtlcSets:      map[HtlcSetKey][]channeldb.HTLC{},
  2650  		},
  2651  	}
  2652  
  2653  	chanArbCtx.AssertStateTransitions(
  2654  		StateContractClosed,
  2655  		StateWaitingFullResolution,
  2656  	)
  2657  
  2658  	// We expect to only have the anchor resolver active.
  2659  	if len(chanArb.activeResolvers) != 1 {
  2660  		t.Fatalf("expected single resolver, instead got: %v",
  2661  			len(chanArb.activeResolvers))
  2662  	}
  2663  
  2664  	resolver := chanArb.activeResolvers[0]
  2665  	_, ok := resolver.(*anchorResolver)
  2666  	if !ok {
  2667  		t.Fatalf("expected anchor resolver, got %T", resolver)
  2668  	}
  2669  
  2670  	// The anchor resolver is expected to re-offer the anchor input to the
  2671  	// sweeper.
  2672  	<-chanArbCtx.sweeper.sweptInputs
  2673  
  2674  	// The mock sweeper immediately signals success for that input. This
  2675  	// should transition the channel to the resolved state.
  2676  	chanArbCtx.AssertStateTransitions(StateFullyResolved)
  2677  	select {
  2678  	case <-chanArbCtx.resolvedChan:
  2679  	case <-time.After(5 * time.Second):
  2680  		t.Fatalf("contract was not resolved")
  2681  	}
  2682  
  2683  	anchorAmt := dcrutil.Amount(
  2684  		anchorResolution.AnchorSignDescriptor.Output.Value,
  2685  	)
  2686  	spendTx := chanArbCtx.sweeper.sweepTx.TxHash()
  2687  	expectedReport := &channeldb.ResolverReport{
  2688  		OutPoint:        anchorResolution.CommitAnchor,
  2689  		Amount:          anchorAmt,
  2690  		ResolverType:    channeldb.ResolverTypeAnchor,
  2691  		ResolverOutcome: channeldb.ResolverOutcomeClaimed,
  2692  		SpendTxID:       &spendTx,
  2693  	}
  2694  
  2695  	assertResolverReport(t, reports, expectedReport)
  2696  
  2697  	// We expect two anchor inputs, the local and the remote to be swept.
  2698  	// Thus we should expect there are two deadlines used, both are equal
  2699  	// to htlcWithPreimage's CLTV minus current block height.
  2700  	require.Equal(t, 2, len(chanArbCtx.sweeper.deadlines))
  2701  	require.EqualValues(t,
  2702  		htlcWithPreimage.RefundTimeout-heightHint,
  2703  		chanArbCtx.sweeper.deadlines[0],
  2704  	)
  2705  	require.EqualValues(t,
  2706  		htlcWithPreimage.RefundTimeout-heightHint,
  2707  		chanArbCtx.sweeper.deadlines[1],
  2708  	)
  2709  
  2710  }
  2711  
  2712  // putResolverReportInChannel returns a put report function which will pipe
  2713  // reports into the channel provided.
  2714  func putResolverReportInChannel(reports chan *channeldb.ResolverReport) func(
  2715  	_ kvdb.RwTx, report *channeldb.ResolverReport) error {
  2716  
  2717  	return func(_ kvdb.RwTx, report *channeldb.ResolverReport) error {
  2718  		reports <- report
  2719  		return nil
  2720  	}
  2721  }
  2722  
  2723  // assertResolverReport checks that  a set of reports only contains a single
  2724  // report, and that it is equal to the expected report passed in.
  2725  func assertResolverReport(t *testing.T, reports chan *channeldb.ResolverReport,
  2726  	expected *channeldb.ResolverReport) {
  2727  
  2728  	select {
  2729  	case report := <-reports:
  2730  		if !reflect.DeepEqual(report, expected) {
  2731  			t.Fatalf("expected: %v, got: %v", expected, report)
  2732  		}
  2733  
  2734  	case <-time.After(defaultTimeout):
  2735  		t.Fatalf("no reports present")
  2736  	}
  2737  }
  2738  
  2739  type mockChannel struct {
  2740  	anchorResolutions *lnwallet.AnchorResolutions
  2741  }
  2742  
  2743  func (m *mockChannel) NewAnchorResolutions() (*lnwallet.AnchorResolutions,
  2744  	error) {
  2745  	if m.anchorResolutions != nil {
  2746  		return m.anchorResolutions, nil
  2747  	}
  2748  
  2749  	return &lnwallet.AnchorResolutions{}, nil
  2750  }
  2751  
  2752  func (m *mockChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error) {
  2753  	summary := &lnwallet.LocalForceCloseSummary{
  2754  		CloseTx:         &wire.MsgTx{},
  2755  		HtlcResolutions: &lnwallet.HtlcResolutions{},
  2756  	}
  2757  	return summary, nil
  2758  }