github.com/decred/dcrlnd@v0.7.6/contractcourt/chain_watcher_test.go (about)

     1  package contractcourt
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/sha256"
     6  	"fmt"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/decred/dcrd/wire"
    11  	"github.com/decred/dcrlnd/chainntnfs"
    12  	"github.com/decred/dcrlnd/chainscan"
    13  	"github.com/decred/dcrlnd/channeldb"
    14  	"github.com/decred/dcrlnd/input"
    15  	"github.com/decred/dcrlnd/lntest/mock"
    16  	"github.com/decred/dcrlnd/lnwallet"
    17  	"github.com/decred/dcrlnd/lnwire"
    18  )
    19  
    20  // TestChainWatcherRemoteUnilateralClose tests that the chain watcher is able
    21  // to properly detect a normal unilateral close by the remote node using their
    22  // lowest commitment.
    23  func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
    24  	t.Parallel()
    25  
    26  	// First, we'll create two channels which already have established a
    27  	// commitment contract between themselves.
    28  	aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
    29  		channeldb.SingleFunderTweaklessBit,
    30  	)
    31  	if err != nil {
    32  		t.Fatalf("unable to create test channels: %v", err)
    33  	}
    34  	defer cleanUp()
    35  
    36  	// With the channels created, we'll now create a chain watcher instance
    37  	// which will be watching for any closes of Alice's channel.
    38  	aliceNotifier := &mock.ChainNotifier{
    39  		SpendChan: make(chan *chainntnfs.SpendDetail),
    40  		EpochChan: make(chan *chainntnfs.BlockEpoch),
    41  		ConfChan:  make(chan *chainntnfs.TxConfirmation),
    42  	}
    43  	aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
    44  		chanState:           aliceChannel.State(),
    45  		notifier:            aliceNotifier,
    46  		signer:              aliceChannel.Signer,
    47  		extractStateNumHint: lnwallet.GetStateNumHint,
    48  	})
    49  	if err != nil {
    50  		t.Fatalf("unable to create chain watcher: %v", err)
    51  	}
    52  	err = aliceChainWatcher.Start()
    53  	if err != nil {
    54  		t.Fatalf("unable to start chain watcher: %v", err)
    55  	}
    56  	defer aliceChainWatcher.Stop()
    57  
    58  	// We'll request a new channel event subscription from Alice's chain
    59  	// watcher.
    60  	chanEvents := aliceChainWatcher.SubscribeChannelEvents()
    61  
    62  	// If we simulate an immediate broadcast of the current commitment by
    63  	// Bob, then the chain watcher should detect this case.
    64  	bobCommit := bobChannel.State().LocalCommitment.CommitTx
    65  	bobTxHash := bobCommit.TxHash()
    66  	bobSpend := &chainntnfs.SpendDetail{
    67  		SpenderTxHash: &bobTxHash,
    68  		SpendingTx:    bobCommit,
    69  	}
    70  	aliceNotifier.SpendChan <- bobSpend
    71  
    72  	// We should get a new spend event over the remote unilateral close
    73  	// event channel.
    74  	var uniClose *RemoteUnilateralCloseInfo
    75  	select {
    76  	case uniClose = <-chanEvents.RemoteUnilateralClosure:
    77  	case <-time.After(time.Second * 15):
    78  		t.Fatalf("didn't receive unilateral close event")
    79  	}
    80  
    81  	// The unilateral close should have properly located Alice's output in
    82  	// the commitment transaction.
    83  	if uniClose.CommitResolution == nil {
    84  		t.Fatalf("unable to find alice's commit resolution")
    85  	}
    86  }
    87  
    88  func addFakeHTLC(t *testing.T, htlcAmount lnwire.MilliAtom, id uint64,
    89  	aliceChannel, bobChannel *lnwallet.LightningChannel) {
    90  
    91  	preimage := bytes.Repeat([]byte{byte(id)}, 32)
    92  	paymentHash := sha256.Sum256(preimage)
    93  	var returnPreimage [32]byte
    94  	copy(returnPreimage[:], preimage)
    95  	htlc := &lnwire.UpdateAddHTLC{
    96  		ID:          id,
    97  		PaymentHash: paymentHash,
    98  		Amount:      htlcAmount,
    99  		Expiry:      uint32(5),
   100  	}
   101  
   102  	if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil {
   103  		t.Fatalf("alice unable to add htlc: %v", err)
   104  	}
   105  	if _, err := bobChannel.ReceiveHTLC(htlc); err != nil {
   106  		t.Fatalf("bob unable to recv add htlc: %v", err)
   107  	}
   108  }
   109  
   110  // TestChainWatcherRemoteUnilateralClosePendingCommit tests that the chain
   111  // watcher is able to properly detect a unilateral close wherein the remote
   112  // node broadcasts their newly received commitment, without first revoking the
   113  // old one.
   114  func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
   115  	t.Parallel()
   116  
   117  	// First, we'll create two channels which already have established a
   118  	// commitment contract between themselves.
   119  	aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
   120  		channeldb.SingleFunderTweaklessBit,
   121  	)
   122  	if err != nil {
   123  		t.Fatalf("unable to create test channels: %v", err)
   124  	}
   125  	defer cleanUp()
   126  
   127  	// With the channels created, we'll now create a chain watcher instance
   128  	// which will be watching for any closes of Alice's channel.
   129  	aliceNotifier := &mock.ChainNotifier{
   130  		SpendChan: make(chan *chainntnfs.SpendDetail),
   131  		EpochChan: make(chan *chainntnfs.BlockEpoch),
   132  		ConfChan:  make(chan *chainntnfs.TxConfirmation),
   133  	}
   134  	aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
   135  		chanState:           aliceChannel.State(),
   136  		notifier:            aliceNotifier,
   137  		signer:              aliceChannel.Signer,
   138  		extractStateNumHint: lnwallet.GetStateNumHint,
   139  	})
   140  	if err != nil {
   141  		t.Fatalf("unable to create chain watcher: %v", err)
   142  	}
   143  	if err := aliceChainWatcher.Start(); err != nil {
   144  		t.Fatalf("unable to start chain watcher: %v", err)
   145  	}
   146  	defer aliceChainWatcher.Stop()
   147  
   148  	// We'll request a new channel event subscription from Alice's chain
   149  	// watcher.
   150  	chanEvents := aliceChainWatcher.SubscribeChannelEvents()
   151  
   152  	// Next, we'll create a fake HTLC just so we can advance Alice's
   153  	// channel state to a new pending commitment on her remote commit chain
   154  	// for Bob.
   155  	htlcAmount := lnwire.NewMAtomsFromAtoms(20000)
   156  	addFakeHTLC(t, htlcAmount, 0, aliceChannel, bobChannel)
   157  
   158  	// With the HTLC added, we'll now manually initiate a state transition
   159  	// from Alice to Bob.
   160  	_, _, _, err = aliceChannel.SignNextCommitment()
   161  	if err != nil {
   162  		t.Fatal(err)
   163  	}
   164  
   165  	// At this point, we'll now Bob broadcasting this new pending unrevoked
   166  	// commitment.
   167  	bobPendingCommit, err := aliceChannel.State().RemoteCommitChainTip()
   168  	if err != nil {
   169  		t.Fatal(err)
   170  	}
   171  
   172  	// We'll craft a fake spend notification with Bob's actual commitment.
   173  	// The chain watcher should be able to detect that this is a pending
   174  	// commit broadcast based on the state hints in the commitment.
   175  	bobCommit := bobPendingCommit.Commitment.CommitTx
   176  	bobTxHash := bobCommit.TxHash()
   177  	bobSpend := &chainntnfs.SpendDetail{
   178  		SpenderTxHash: &bobTxHash,
   179  		SpendingTx:    bobCommit,
   180  	}
   181  	aliceNotifier.SpendChan <- bobSpend
   182  
   183  	// We should get a new spend event over the remote unilateral close
   184  	// event channel.
   185  	var uniClose *RemoteUnilateralCloseInfo
   186  	select {
   187  	case uniClose = <-chanEvents.RemoteUnilateralClosure:
   188  	case <-time.After(time.Second * 15):
   189  		t.Fatalf("didn't receive unilateral close event")
   190  	}
   191  
   192  	// The unilateral close should have properly located Alice's output in
   193  	// the commitment transaction.
   194  	if uniClose.CommitResolution == nil {
   195  		t.Fatalf("unable to find alice's commit resolution")
   196  	}
   197  }
   198  
   199  // TestChainWatcherCorrectSpendNtn tests whether the chainWatcher is deriving
   200  // the correct info for watching the chain for a given channel.
   201  func TestChainWatcherCorrectSpendNtnf(t *testing.T) {
   202  	t.Parallel()
   203  
   204  	// First, we'll create two channels which already have established a
   205  	// commitment contract between themselves.
   206  	aliceChannel, _, cleanUp, err := lnwallet.CreateTestChannels(
   207  		channeldb.SingleFunderBit,
   208  	)
   209  	if err != nil {
   210  		t.Fatalf("unable to create test channels: %v", err)
   211  	}
   212  	defer cleanUp()
   213  
   214  	// With the channels created, we'll now create a chain watcher instance
   215  	// which will be watching for any closes of Alice's channel.
   216  	aliceNotifier := &mock.ChainNotifier{
   217  		SpendChan: make(chan *chainntnfs.SpendDetail),
   218  	}
   219  	aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
   220  		chanState: aliceChannel.State(),
   221  		notifier:  aliceNotifier,
   222  		signer:    aliceChannel.Signer,
   223  	})
   224  	if err != nil {
   225  		t.Fatalf("unable to create chain watcher: %v", err)
   226  	}
   227  	if err := aliceChainWatcher.Start(); err != nil {
   228  		t.Fatalf("unable to start chain watcher: %v", err)
   229  	}
   230  	defer aliceChainWatcher.Stop()
   231  
   232  	// The mock chain notifier should have registered a watch for the given
   233  	// channel.
   234  	registeredSpends := aliceNotifier.RegisteredSpendNtfns()
   235  	if len(registeredSpends) != 1 {
   236  		t.Fatalf("expected 1 spend notification watchers by found %d",
   237  			len(registeredSpends))
   238  	}
   239  
   240  	aliceChanPoint := aliceChannel.ChanPoint
   241  	ntnfReq := registeredSpends[0]
   242  	if ntnfReq.Outpoint != *aliceChanPoint {
   243  		t.Fatalf("expected spend ntnf to be watching channel outpoint "+
   244  			"%s, instead watching %s", aliceChanPoint,
   245  			ntnfReq.Outpoint)
   246  	}
   247  
   248  	_, err = chainscan.ParsePkScript(0, ntnfReq.PkScript)
   249  	if err != nil {
   250  		t.Fatalf("unable to parse watched pkscript: %v", err)
   251  	}
   252  }
   253  
   254  // dlpTestCase is a special struct that we'll use to generate randomized test
   255  // cases for the main TestChainWatcherDataLossProtect test. This struct has a
   256  // special Generate method that will generate a random state number, and a
   257  // broadcast state number which is greater than that state number.
   258  type dlpTestCase struct {
   259  	BroadcastStateNum uint8
   260  	NumUpdates        uint8
   261  }
   262  
   263  // executeStateTransitions execute the given number of state transitions.
   264  // Copies of Alice's channel state before each transition (including initial
   265  // state) are returned.
   266  func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliAtom,
   267  	aliceChannel, bobChannel *lnwallet.LightningChannel,
   268  	numUpdates uint8) ([]*channeldb.OpenChannel, func(), error) {
   269  
   270  	// We'll make a copy of the channel state before each transition.
   271  	var (
   272  		chanStates   []*channeldb.OpenChannel
   273  		cleanupFuncs []func()
   274  	)
   275  
   276  	cleanAll := func() {
   277  		for _, f := range cleanupFuncs {
   278  			f()
   279  		}
   280  	}
   281  
   282  	state, f, err := copyChannelState(aliceChannel.State())
   283  	if err != nil {
   284  		return nil, nil, err
   285  	}
   286  
   287  	chanStates = append(chanStates, state)
   288  	cleanupFuncs = append(cleanupFuncs, f)
   289  
   290  	for i := 0; i < int(numUpdates); i++ {
   291  		addFakeHTLC(
   292  			t, htlcAmount, uint64(i), aliceChannel, bobChannel,
   293  		)
   294  
   295  		err := lnwallet.ForceStateTransition(aliceChannel, bobChannel)
   296  		if err != nil {
   297  			cleanAll()
   298  			return nil, nil, err
   299  		}
   300  
   301  		state, f, err := copyChannelState(aliceChannel.State())
   302  		if err != nil {
   303  			cleanAll()
   304  			return nil, nil, err
   305  		}
   306  
   307  		chanStates = append(chanStates, state)
   308  		cleanupFuncs = append(cleanupFuncs, f)
   309  	}
   310  
   311  	return chanStates, cleanAll, nil
   312  }
   313  
   314  // TestChainWatcherDataLossProtect tests that if we've lost data (and are
   315  // behind the remote node), then we'll properly detect this case and dispatch a
   316  // remote force close using the obtained data loss commitment point.
   317  func TestChainWatcherDataLossProtect(t *testing.T) {
   318  	t.Parallel()
   319  
   320  	// dlpScenario is our primary quick check testing function for this
   321  	// test as whole. It ensures that if the remote party broadcasts a
   322  	// commitment that is beyond our best known commitment for them, and
   323  	// they don't have a pending commitment (one we sent but which hasn't
   324  	// been revoked), then we'll properly detect this case, and execute the
   325  	// DLP protocol on our end.
   326  	//
   327  	// broadcastStateNum is the number that we'll trick Alice into thinking
   328  	// was broadcast, while numUpdates is the actual number of updates
   329  	// we'll execute. Both of these will be random 8-bit values generated
   330  	// by testing/quick.
   331  	dlpScenario := func(t *testing.T, testCase dlpTestCase) bool {
   332  		// First, we'll create two channels which already have
   333  		// established a commitment contract between themselves.
   334  		aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
   335  			channeldb.SingleFunderBit,
   336  		)
   337  		if err != nil {
   338  			t.Fatalf("unable to create test channels: %v", err)
   339  		}
   340  		defer cleanUp()
   341  
   342  		// Based on the number of random updates for this state, make a
   343  		// new HTLC to add to the commitment, and then lock in a state
   344  		// transition.
   345  		const htlcAmt = 1000
   346  		states, cleanStates, err := executeStateTransitions(
   347  			t, htlcAmt, aliceChannel, bobChannel,
   348  			testCase.BroadcastStateNum,
   349  		)
   350  		if err != nil {
   351  			t.Errorf("unable to trigger state "+
   352  				"transition: %v", err)
   353  			return false
   354  		}
   355  		defer cleanStates()
   356  
   357  		// We'll use the state this test case wants Alice to start at.
   358  		aliceChanState := states[testCase.NumUpdates]
   359  
   360  		// With the channels created, we'll now create a chain watcher
   361  		// instance which will be watching for any closes of Alice's
   362  		// channel.
   363  		aliceNotifier := &mock.ChainNotifier{
   364  			SpendChan: make(chan *chainntnfs.SpendDetail),
   365  			EpochChan: make(chan *chainntnfs.BlockEpoch),
   366  			ConfChan:  make(chan *chainntnfs.TxConfirmation),
   367  		}
   368  		aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
   369  			chanState: aliceChanState,
   370  			notifier:  aliceNotifier,
   371  			signer:    aliceChannel.Signer,
   372  			extractStateNumHint: func(*wire.MsgTx,
   373  				[lnwallet.StateHintSize]byte) uint64 {
   374  
   375  				// We'll return the "fake" broadcast commitment
   376  				// number so we can simulate broadcast of an
   377  				// arbitrary state.
   378  				return uint64(testCase.BroadcastStateNum)
   379  			},
   380  		})
   381  		if err != nil {
   382  			t.Fatalf("unable to create chain watcher: %v", err)
   383  		}
   384  		if err := aliceChainWatcher.Start(); err != nil {
   385  			t.Fatalf("unable to start chain watcher: %v", err)
   386  		}
   387  		defer aliceChainWatcher.Stop()
   388  
   389  		// We'll request a new channel event subscription from Alice's
   390  		// chain watcher so we can be notified of our fake close below.
   391  		chanEvents := aliceChainWatcher.SubscribeChannelEvents()
   392  
   393  		// Otherwise, we'll feed in this new state number as a response
   394  		// to the query, and insert the expected DLP commit point.
   395  		dlpPoint := aliceChannel.State().RemoteCurrentRevocation
   396  		err = aliceChanState.MarkDataLoss(dlpPoint)
   397  		if err != nil {
   398  			t.Errorf("unable to insert dlp point: %v", err)
   399  			return false
   400  		}
   401  
   402  		// Now we'll trigger the channel close event to trigger the
   403  		// scenario.
   404  		bobCommit := bobChannel.State().LocalCommitment.CommitTx
   405  		bobTxHash := bobCommit.TxHash()
   406  		bobSpend := &chainntnfs.SpendDetail{
   407  			SpenderTxHash: &bobTxHash,
   408  			SpendingTx:    bobCommit,
   409  		}
   410  		aliceNotifier.SpendChan <- bobSpend
   411  
   412  		// We should get a new uni close resolution that indicates we
   413  		// processed the DLP scenario.
   414  		var uniClose *RemoteUnilateralCloseInfo
   415  		select {
   416  		case uniClose = <-chanEvents.RemoteUnilateralClosure:
   417  			// If we processed this as a DLP case, then the remote
   418  			// party's commitment should be blank, as we don't have
   419  			// this up to date state.
   420  			blankCommit := channeldb.ChannelCommitment{}
   421  			if uniClose.RemoteCommit.FeePerKB != blankCommit.FeePerKB {
   422  				t.Errorf("DLP path not executed")
   423  				return false
   424  			}
   425  
   426  			// The resolution should have also read the DLP point
   427  			// we stored above, and used that to derive their sweep
   428  			// key for this output.
   429  			sweepTweak := input.SingleTweakBytes(
   430  				dlpPoint,
   431  				aliceChannel.State().LocalChanCfg.PaymentBasePoint.PubKey,
   432  			)
   433  			commitResolution := uniClose.CommitResolution
   434  			resolutionTweak := commitResolution.SelfOutputSignDesc.SingleTweak
   435  			if !bytes.Equal(sweepTweak, resolutionTweak) {
   436  				t.Errorf("sweep key mismatch: expected %x got %x",
   437  					sweepTweak, resolutionTweak)
   438  				return false
   439  			}
   440  
   441  			return true
   442  
   443  		case <-time.After(time.Second * 5):
   444  			t.Errorf("didn't receive unilateral close event")
   445  			return false
   446  		}
   447  	}
   448  
   449  	testCases := []dlpTestCase{
   450  		// For our first scenario, we'll ensure that if we're on state 1,
   451  		// and the remote party broadcasts state 2 and we don't have a
   452  		// pending commit for them, then we'll properly detect this as a
   453  		// DLP scenario.
   454  		{
   455  			BroadcastStateNum: 2,
   456  			NumUpdates:        1,
   457  		},
   458  
   459  		// We've completed a single update, but the remote party broadcasts
   460  		// a state that's 5 states byeond our best known state. We've lost
   461  		// data, but only partially, so we should enter a DLP secnario.
   462  		{
   463  			BroadcastStateNum: 6,
   464  			NumUpdates:        1,
   465  		},
   466  
   467  		// Similar to the case above, but we've done more than one
   468  		// update.
   469  		{
   470  			BroadcastStateNum: 6,
   471  			NumUpdates:        3,
   472  		},
   473  
   474  		// We've done zero updates, but our channel peer broadcasts a
   475  		// state beyond our knowledge.
   476  		{
   477  			BroadcastStateNum: 10,
   478  			NumUpdates:        0,
   479  		},
   480  	}
   481  	for _, testCase := range testCases {
   482  		testName := fmt.Sprintf("num_updates=%v,broadcast_state_num=%v",
   483  			testCase.NumUpdates, testCase.BroadcastStateNum)
   484  
   485  		testCase := testCase
   486  		t.Run(testName, func(t *testing.T) {
   487  			t.Parallel()
   488  
   489  			if !dlpScenario(t, testCase) {
   490  				t.Fatalf("test %v failed", testName)
   491  			}
   492  		})
   493  	}
   494  }
   495  
   496  // TestChainWatcherLocalForceCloseDetect tests we're able to always detect our
   497  // commitment output based on only the outputs present on the transaction.
   498  func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
   499  	t.Parallel()
   500  
   501  	// localForceCloseScenario is the primary test we'll use to execute our
   502  	// table driven tests. We'll assert that for any number of state
   503  	// updates, and if the commitment transaction has our output or not,
   504  	// we're able to properly detect a local force close.
   505  	localForceCloseScenario := func(t *testing.T, numUpdates, localState uint8,
   506  		remoteOutputOnly, localOutputOnly bool) bool {
   507  
   508  		// First, we'll create two channels which already have
   509  		// established a commitment contract between themselves.
   510  		aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
   511  			channeldb.SingleFunderBit,
   512  		)
   513  		if err != nil {
   514  			t.Fatalf("unable to create test channels: %v", err)
   515  		}
   516  		defer cleanUp()
   517  
   518  		// We'll execute a number of state transitions based on the
   519  		// randomly selected number from testing/quick. We do this to
   520  		// get more coverage of various state hint encodings beyond 0
   521  		// and 1.
   522  		const htlcAmt = 1000
   523  		states, cleanStates, err := executeStateTransitions(
   524  			t, htlcAmt, aliceChannel, bobChannel, numUpdates,
   525  		)
   526  		if err != nil {
   527  			t.Errorf("unable to trigger state "+
   528  				"transition: %v", err)
   529  			return false
   530  		}
   531  		defer cleanStates()
   532  
   533  		// We'll use the state this test case wants Alice to start at.
   534  		aliceChanState := states[localState]
   535  
   536  		// With the channels created, we'll now create a chain watcher
   537  		// instance which will be watching for any closes of Alice's
   538  		// channel.
   539  		aliceNotifier := &mock.ChainNotifier{
   540  			SpendChan: make(chan *chainntnfs.SpendDetail),
   541  			EpochChan: make(chan *chainntnfs.BlockEpoch),
   542  			ConfChan:  make(chan *chainntnfs.TxConfirmation),
   543  		}
   544  		aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
   545  			chanState:           aliceChanState,
   546  			notifier:            aliceNotifier,
   547  			signer:              aliceChannel.Signer,
   548  			extractStateNumHint: lnwallet.GetStateNumHint,
   549  		})
   550  		if err != nil {
   551  			t.Fatalf("unable to create chain watcher: %v", err)
   552  		}
   553  		if err := aliceChainWatcher.Start(); err != nil {
   554  			t.Fatalf("unable to start chain watcher: %v", err)
   555  		}
   556  		defer aliceChainWatcher.Stop()
   557  
   558  		// We'll request a new channel event subscription from Alice's
   559  		// chain watcher so we can be notified of our fake close below.
   560  		chanEvents := aliceChainWatcher.SubscribeChannelEvents()
   561  
   562  		// Next, we'll obtain Alice's commitment transaction and
   563  		// trigger a force close. This should cause her to detect a
   564  		// local force close, and dispatch a local close event.
   565  		aliceCommit := aliceChannel.State().LocalCommitment.CommitTx
   566  
   567  		// Since this is Alice's commitment, her output is always first
   568  		// since she's the one creating the HTLCs (lower balance). In
   569  		// order to simulate the commitment only having the remote
   570  		// party's output, we'll remove Alice's output.
   571  		if remoteOutputOnly {
   572  			aliceCommit.TxOut = aliceCommit.TxOut[1:]
   573  		}
   574  		if localOutputOnly {
   575  			aliceCommit.TxOut = aliceCommit.TxOut[:1]
   576  		}
   577  
   578  		aliceTxHash := aliceCommit.TxHash()
   579  		aliceSpend := &chainntnfs.SpendDetail{
   580  			SpenderTxHash: &aliceTxHash,
   581  			SpendingTx:    aliceCommit,
   582  		}
   583  		aliceNotifier.SpendChan <- aliceSpend
   584  
   585  		// We should get a local force close event from Alice as she
   586  		// should be able to detect the close based on the commitment
   587  		// outputs.
   588  		select {
   589  		case summary := <-chanEvents.LocalUnilateralClosure:
   590  			// Make sure we correctly extracted the commit
   591  			// resolution if we had a local output.
   592  			if remoteOutputOnly {
   593  				if summary.CommitResolution != nil {
   594  					t.Fatalf("expected no commit resolution")
   595  				}
   596  			} else {
   597  				if summary.CommitResolution == nil {
   598  					t.Fatalf("expected commit resolution")
   599  				}
   600  			}
   601  
   602  			return true
   603  
   604  		case <-time.After(time.Second * 5):
   605  			t.Errorf("didn't get local for close for state #%v",
   606  				numUpdates)
   607  			return false
   608  		}
   609  	}
   610  
   611  	// For our test cases, we'll ensure that we test having a remote output
   612  	// present and absent with non or some number of updates in the channel.
   613  	testCases := []struct {
   614  		numUpdates       uint8
   615  		localState       uint8
   616  		remoteOutputOnly bool
   617  		localOutputOnly  bool
   618  	}{
   619  		{
   620  			numUpdates:       0,
   621  			localState:       0,
   622  			remoteOutputOnly: true,
   623  		},
   624  		{
   625  			numUpdates:       0,
   626  			localState:       0,
   627  			remoteOutputOnly: false,
   628  		},
   629  		{
   630  			numUpdates:      0,
   631  			localState:      0,
   632  			localOutputOnly: true,
   633  		},
   634  		{
   635  			numUpdates:       20,
   636  			localState:       20,
   637  			remoteOutputOnly: false,
   638  		},
   639  		{
   640  			numUpdates:       20,
   641  			localState:       20,
   642  			remoteOutputOnly: true,
   643  		},
   644  		{
   645  			numUpdates:      20,
   646  			localState:      20,
   647  			localOutputOnly: true,
   648  		},
   649  		{
   650  			numUpdates:       20,
   651  			localState:       5,
   652  			remoteOutputOnly: false,
   653  		},
   654  		{
   655  			numUpdates:       20,
   656  			localState:       5,
   657  			remoteOutputOnly: true,
   658  		},
   659  		{
   660  			numUpdates:      20,
   661  			localState:      5,
   662  			localOutputOnly: true,
   663  		},
   664  	}
   665  	for _, testCase := range testCases {
   666  		testName := fmt.Sprintf(
   667  			"num_updates=%v,remote_output=%v,local_output=%v",
   668  			testCase.numUpdates, testCase.remoteOutputOnly,
   669  			testCase.localOutputOnly,
   670  		)
   671  
   672  		testCase := testCase
   673  		t.Run(testName, func(t *testing.T) {
   674  			t.Parallel()
   675  
   676  			localForceCloseScenario(
   677  				t, testCase.numUpdates, testCase.localState,
   678  				testCase.remoteOutputOnly,
   679  				testCase.localOutputOnly,
   680  			)
   681  		})
   682  	}
   683  }