github.com/decred/dcrlnd@v0.7.6/lntest/itest/lnd_channel_force_close_test.go (about)

     1  package itest
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"testing"
     8  
     9  	"github.com/decred/dcrd/dcrutil/v4"
    10  	"github.com/decred/dcrd/wire"
    11  	"github.com/decred/dcrlnd/chainreg"
    12  	"github.com/decred/dcrlnd/lnrpc"
    13  	"github.com/decred/dcrlnd/lnrpc/routerrpc"
    14  	"github.com/decred/dcrlnd/lntest"
    15  	"github.com/decred/dcrlnd/lntest/wait"
    16  	"github.com/decred/dcrlnd/lnwallet"
    17  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    18  	"github.com/decred/dcrlnd/routing"
    19  	"github.com/go-errors/errors"
    20  	"github.com/stretchr/testify/require"
    21  )
    22  
    23  // testCommitmentTransactionDeadline tests that the anchor sweep transaction is
    24  // taking account of the deadline of the commitment transaction. It tests two
    25  // scenarios:
    26  //  1. when the CPFP is skipped, checks that the deadline is not used.
    27  //  2. when the CPFP is used, checks that the deadline is applied.
    28  //
    29  // Note that whether the deadline is used or not is implicitly checked by its
    30  // corresponding fee rates.
    31  func testCommitmentTransactionDeadline(net *lntest.NetworkHarness,
    32  	t *harnessTest) {
    33  
    34  	// Get the default max fee rate used in sweeping the commitment
    35  	// transaction.
    36  	defaultMax := lnwallet.DefaultAnchorsCommitMaxFeeRateAtomsPerByte
    37  	maxPerKb := chainfee.AtomPerKByte(defaultMax * 1000)
    38  
    39  	const (
    40  		// feeRateConfDefault(atoms/kb) is used when no conf target is
    41  		// set. This value will be returned by the fee estimator but
    42  		// won't be used because our commitment fee rate is capped by
    43  		// DefaultAnchorsCommitMaxFeeRateAtomsPerByte.
    44  		feeRateDefault = 20000
    45  
    46  		// finalCTLV is used when Alice sends payment to Bob.
    47  		finalCTLV = 144
    48  
    49  		// deadline is used when Alice sweep the anchor. Notice there
    50  		// is a block padding of 3 added, such that the value of
    51  		// deadline is 147.
    52  		deadline = uint32(finalCTLV + routing.BlockPadding)
    53  	)
    54  
    55  	// feeRateSmall(atoms/kb) is used when we want to skip the CPFP
    56  	// on anchor transactions. When the fee rate is smaller than
    57  	// the parent's (commitment transaction) fee rate, the CPFP
    58  	// will be skipped. Atm, the parent tx's fee rate is roughly
    59  	// 2500 atoms/kb in this test.
    60  	feeRateSmall := maxPerKb / 2
    61  
    62  	// feeRateLarge(atoms/kb) is used when we want to use the anchor
    63  	// transaction to CPFP our commitment transaction.
    64  	feeRateLarge := maxPerKb * 2
    65  
    66  	ctxb := context.Background()
    67  
    68  	// Before we start, set up the default fee rate and we will test the
    69  	// actual fee rate against it to decide whether we are using the
    70  	// deadline to perform fee estimation.
    71  	net.SetFeeEstimate(feeRateDefault)
    72  
    73  	// setupNode creates a new node and sends 1 btc to the node.
    74  	setupNode := func(name string) *lntest.HarnessNode {
    75  		// Create the node.
    76  		args := []string{"--hodl.exit-settle"}
    77  		args = append(args, nodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)...)
    78  		node := net.NewNode(t.t, name, args)
    79  
    80  		// Send some coins to the node.
    81  		net.SendCoins(t.t, dcrutil.AtomsPerCoin, node)
    82  
    83  		// We need one additional UTXO to create the sweeping tx for
    84  		// the remote anchor.
    85  		net.SendCoins(t.t, dcrutil.AtomsPerCoin, node)
    86  		return node
    87  	}
    88  
    89  	// calculateSweepFeeRate runs multiple steps to calculate the fee rate
    90  	// used in sweeping the transactions.
    91  	calculateSweepFeeRate := func(expectedSweepTxNum int) int64 {
    92  		// Create two nodes, Alice and Bob.
    93  		alice := setupNode("Alice")
    94  		defer shutdownAndAssert(net, t, alice)
    95  
    96  		bob := setupNode("Bob")
    97  		defer shutdownAndAssert(net, t, bob)
    98  
    99  		// Connect Alice to Bob.
   100  		net.ConnectNodes(t.t, alice, bob)
   101  
   102  		// Open a channel between Alice and Bob.
   103  		chanPoint := openChannelAndAssert(
   104  			t, net, alice, bob, lntest.OpenChannelParams{
   105  				Amt:     10e6,
   106  				PushAmt: 5e6,
   107  			},
   108  		)
   109  
   110  		// Send a payment with a specified finalCTLVDelta, which will
   111  		// be used as our deadline later on when Alice force closes the
   112  		// channel.
   113  		ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
   114  		defer cancel()
   115  		_, err := alice.RouterClient.SendPaymentV2(
   116  			ctxt, &routerrpc.SendPaymentRequest{
   117  				Dest:           bob.PubKey[:],
   118  				Amt:            10e4,
   119  				PaymentHash:    makeFakePayHash(t),
   120  				FinalCltvDelta: finalCTLV,
   121  				TimeoutSeconds: 60,
   122  				FeeLimitMAtoms: noFeeLimitMAtoms,
   123  			},
   124  		)
   125  		require.NoError(t.t, err, "unable to send alice htlc")
   126  
   127  		// Once the HTLC has cleared, all the nodes in our mini network
   128  		// should show that the HTLC has been locked in.
   129  		nodes := []*lntest.HarnessNode{alice, bob}
   130  		err = wait.NoError(func() error {
   131  			return assertNumActiveHtlcs(nodes, 1)
   132  		}, defaultTimeout)
   133  		require.NoError(t.t, err, "htlc mismatch")
   134  
   135  		// Alice force closes the channel.
   136  		_, _, err = net.CloseChannel(alice, chanPoint, true)
   137  		require.NoError(t.t, err, "unable to force close channel")
   138  
   139  		// Now that the channel has been force closed, it should show
   140  		// up in the PendingChannels RPC under the waiting close
   141  		// section.
   142  		ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
   143  		defer cancel()
   144  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
   145  		pendingChanResp, err := alice.PendingChannels(
   146  			ctxt, pendingChansRequest,
   147  		)
   148  		require.NoError(
   149  			t.t, err, "unable to query for pending channels",
   150  		)
   151  		require.NoError(
   152  			t.t, checkNumWaitingCloseChannels(pendingChanResp, 1),
   153  		)
   154  
   155  		// Check our sweep transactions can be found in mempool.
   156  		sweepTxns, err := getNTxsFromMempool(
   157  			net.Miner.Node,
   158  			expectedSweepTxNum, minerMempoolTimeout,
   159  		)
   160  		require.NoError(
   161  			t.t, err, "failed to find commitment tx in mempool",
   162  		)
   163  
   164  		// Mine a block to confirm these transactions such that they
   165  		// don't remain in the mempool for any subsequent tests.
   166  		mineBlocks(t, net, 1, 0)
   167  		require.NoError(t.t, err, "unable to mine blocks")
   168  
   169  		// Calculate the fee rate used.
   170  		feeRate := calculateTxnsFeeRate(t.t, net.Miner, sweepTxns)
   171  
   172  		return feeRate
   173  	}
   174  
   175  	// Setup our fee estimation for the deadline. Because the fee rate is
   176  	// smaller than the parent tx's fee rate, this value won't be used and
   177  	// we should see only one sweep tx in the mempool.
   178  	net.SetFeeEstimateWithConf(feeRateSmall, deadline)
   179  
   180  	// Calculate fee rate used.
   181  	feeRate := calculateSweepFeeRate(1)
   182  
   183  	// We expect the default max fee rate is used. Allow some deviation
   184  	// because weight estimates during tx generation are estimates.
   185  	require.InEpsilonf(
   186  		t.t, int64(maxPerKb), feeRate, 0.02,
   187  		"expected fee rate:%d, got fee rate:%d", maxPerKb, feeRate,
   188  	)
   189  
   190  	// Setup our fee estimation for the deadline. Because the fee rate is
   191  	// greater than the parent tx's fee rate, this value will be used to
   192  	// sweep the anchor transaction and we should see two sweep
   193  	// transactions in the mempool.
   194  	net.SetFeeEstimateWithConf(feeRateLarge, deadline)
   195  
   196  	// Calculate fee rate used.
   197  	feeRate = calculateSweepFeeRate(2)
   198  
   199  	// We expect the anchor to be swept with the deadline, which has the
   200  	// fee rate of feeRateLarge.
   201  	require.InEpsilonf(
   202  		t.t, int64(feeRateLarge), feeRate, 0.02,
   203  		"expected fee rate:%d, got fee rate:%d", feeRateLarge, feeRate,
   204  	)
   205  }
   206  
   207  // calculateTxnsFeeRate takes a list of transactions and estimates the fee rate
   208  // used to sweep them.
   209  func calculateTxnsFeeRate(t *testing.T,
   210  	miner *lntest.HarnessMiner, txns []*wire.MsgTx) int64 {
   211  
   212  	var totalSize, totalFee int64
   213  	for _, tx := range txns {
   214  		totalSize += int64(tx.SerializeSize())
   215  
   216  		fee, err := getTxFee(miner.Node, tx)
   217  		require.NoError(t, err)
   218  
   219  		totalFee += int64(fee)
   220  	}
   221  	feeRate := totalFee * 1000 / totalSize
   222  
   223  	return feeRate
   224  }
   225  
   226  // testChannelForceClosure performs a test to exercise the behavior of "force"
   227  // closing a channel or unilaterally broadcasting the latest local commitment
   228  // state on-chain. The test creates a new channel between Alice and Carol, then
   229  // force closes the channel after some cursory assertions. Within the test, a
   230  // total of 3 + n transactions will be broadcast, representing the commitment
   231  // transaction, a transaction sweeping the local CSV delayed output, a
   232  // transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n
   233  // htlc timeout transactions, where n is the number of payments Alice attempted
   234  // to send to Carol.  This test includes several restarts to ensure that the
   235  // transaction output states are persisted throughout the forced closure
   236  // process.
   237  //
   238  // TODO(roasbeef): also add an unsettled HTLC before force closing.
   239  func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) {
   240  	// We'll test the scenario for some of the commitment types, to ensure
   241  	// outputs can be swept.
   242  	commitTypes := []lnrpc.CommitmentType{
   243  		lnrpc.CommitmentType_LEGACY,
   244  		lnrpc.CommitmentType_ANCHORS,
   245  	}
   246  
   247  	for _, channelType := range commitTypes {
   248  		testName := fmt.Sprintf("committype=%v", channelType)
   249  
   250  		channelType := channelType
   251  		success := t.t.Run(testName, func(t *testing.T) {
   252  			ht := newHarnessTest(t, net)
   253  
   254  			args := nodeArgsForCommitType(channelType)
   255  			alice := net.NewNode(ht.t, "Alice", args)
   256  			defer shutdownAndAssert(net, ht, alice)
   257  
   258  			// Since we'd like to test failure scenarios with
   259  			// outstanding htlcs, we'll introduce another node into
   260  			// our test network: Carol.
   261  			carolArgs := []string{"--hodl.exit-settle"}
   262  			carolArgs = append(carolArgs, args...)
   263  			carol := net.NewNode(ht.t, "Carol", carolArgs)
   264  			defer shutdownAndAssert(net, ht, carol)
   265  
   266  			// Each time, we'll send Alice  new set of coins in
   267  			// order to fund the channel.
   268  			net.SendCoins(t, dcrutil.AtomsPerCoin, alice)
   269  
   270  			// Also give Carol some coins to allow her to sweep her
   271  			// anchor.
   272  			net.SendCoins(t, dcrutil.AtomsPerCoin, carol)
   273  
   274  			channelForceClosureTest(
   275  				net, ht, alice, carol, channelType,
   276  			)
   277  		})
   278  		if !success {
   279  			return
   280  		}
   281  	}
   282  }
   283  
   284  func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
   285  	alice, carol *lntest.HarnessNode, channelType lnrpc.CommitmentType) {
   286  
   287  	// See the comment on testMultiHopHtlcAggregation.
   288  	if channelType == lnrpc.CommitmentType_ANCHORS {
   289  		t.Skipf("HTLC aggregation cannot happen in dcrlnd")
   290  	}
   291  
   292  	ctxb := context.Background()
   293  
   294  	const (
   295  		chanAmt     = dcrutil.Amount(10e6)
   296  		pushAmt     = dcrutil.Amount(5e6)
   297  		paymentAmt  = 100000
   298  		numInvoices = 6
   299  	)
   300  
   301  	const commitFeeRate = 20000
   302  	net.SetFeeEstimate(commitFeeRate)
   303  
   304  	// TODO(roasbeef): should check default value in config here
   305  	// instead, or make delay a param
   306  	defaultCLTV := uint32(chainreg.DefaultDecredTimeLockDelta)
   307  
   308  	// We must let Alice have an open channel before she can send a node
   309  	// announcement, so we open a channel with Carol,
   310  	net.ConnectNodes(t.t, alice, carol)
   311  
   312  	// We need one additional UTXO for sweeping the remote anchor.
   313  	net.SendCoins(t.t, dcrutil.AtomsPerCoin, alice)
   314  
   315  	// Before we start, obtain Carol's current wallet balance, we'll check
   316  	// to ensure that at the end of the force closure by Alice, Carol
   317  	// recognizes his new on-chain output.
   318  	carolBalReq := &lnrpc.WalletBalanceRequest{}
   319  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   320  	carolBalResp, err := carol.WalletBalance(ctxt, carolBalReq)
   321  	if err != nil {
   322  		t.Fatalf("unable to get carol's balance: %v", err)
   323  	}
   324  
   325  	carolStartingBalance := carolBalResp.ConfirmedBalance
   326  
   327  	chanPoint := openChannelAndAssert(
   328  		t, net, alice, carol,
   329  		lntest.OpenChannelParams{
   330  			Amt:     chanAmt,
   331  			PushAmt: pushAmt,
   332  		},
   333  	)
   334  
   335  	// Wait for Alice and Carol to receive the channel edge from the
   336  	// funding manager.
   337  	err = alice.WaitForNetworkChannelOpen(chanPoint)
   338  	if err != nil {
   339  		t.Fatalf("alice didn't see the alice->carol channel before "+
   340  			"timeout: %v", err)
   341  	}
   342  	err = carol.WaitForNetworkChannelOpen(chanPoint)
   343  	if err != nil {
   344  		t.Fatalf("alice didn't see the alice->carol channel before "+
   345  			"timeout: %v", err)
   346  	}
   347  
   348  	// Send payments from Alice to Carol, since Carol is htlchodl mode, the
   349  	// htlc outputs should be left unsettled, and should be swept by the
   350  	// utxo nursery.
   351  	carolPubKey := carol.PubKey[:]
   352  	for i := 0; i < numInvoices; i++ {
   353  		ctx, cancel := context.WithCancel(ctxb)
   354  		defer cancel()
   355  
   356  		_, err := alice.RouterClient.SendPaymentV2(
   357  			ctx,
   358  			&routerrpc.SendPaymentRequest{
   359  				Dest:           carolPubKey,
   360  				Amt:            int64(paymentAmt),
   361  				PaymentHash:    makeFakePayHash(t),
   362  				FinalCltvDelta: chainreg.DefaultDecredTimeLockDelta,
   363  				TimeoutSeconds: 60,
   364  				FeeLimitMAtoms: noFeeLimitMAtoms,
   365  			},
   366  		)
   367  		if err != nil {
   368  			t.Fatalf("unable to send alice htlc: %v", err)
   369  		}
   370  	}
   371  
   372  	// Once the HTLC has cleared, all the nodes n our mini network should
   373  	// show that the HTLC has been locked in.
   374  	nodes := []*lntest.HarnessNode{alice, carol}
   375  	var predErr error
   376  	err = wait.Predicate(func() bool {
   377  		predErr = assertNumActiveHtlcs(nodes, numInvoices)
   378  		return predErr == nil
   379  	}, defaultTimeout)
   380  	if err != nil {
   381  		t.Fatalf("htlc mismatch: %v", predErr)
   382  	}
   383  
   384  	// Fetch starting height of this test so we can compute the block
   385  	// heights we expect certain events to take place.
   386  	_, curHeight, err := net.Miner.Node.GetBestBlock(context.Background())
   387  	if err != nil {
   388  		t.Fatalf("unable to get best block height")
   389  	}
   390  
   391  	// Using the current height of the chain, derive the relevant heights
   392  	// for incubating two-stage htlcs.
   393  	var (
   394  		startHeight           = uint32(curHeight)
   395  		commCsvMaturityHeight = startHeight + 1 + defaultCSV
   396  		htlcExpiryHeight      = padCLTV(startHeight + defaultCLTV)
   397  		htlcCsvMaturityHeight = padCLTV(startHeight + defaultCLTV + 1 + defaultCSV)
   398  	)
   399  
   400  	// If we are dealing with an anchor channel type, the sweeper will
   401  	// sweep the HTLC second level output one block earlier (than the
   402  	// nursery that waits an additional block, and handles non-anchor
   403  	// channels). So we set a maturity height that is one less.
   404  	if channelType == lnrpc.CommitmentType_ANCHORS {
   405  		htlcCsvMaturityHeight = padCLTV(
   406  			startHeight + defaultCLTV + defaultCSV,
   407  		)
   408  	}
   409  
   410  	aliceChan, err := getChanInfo(alice)
   411  	if err != nil {
   412  		t.Fatalf("unable to get alice's channel info: %v", err)
   413  	}
   414  	if aliceChan.NumUpdates == 0 {
   415  		t.Fatalf("alice should see at least one update to her channel")
   416  	}
   417  
   418  	// Now that the channel is open and we have unsettled htlcs, immediately
   419  	// execute a force closure of the channel. This will also assert that
   420  	// the commitment transaction was immediately broadcast in order to
   421  	// fulfill the force closure request.
   422  	const actualFeeRate = 30000
   423  	net.SetFeeEstimate(actualFeeRate)
   424  
   425  	_, closingTxID, err := net.CloseChannel(alice, chanPoint, true)
   426  	if err != nil {
   427  		t.Fatalf("unable to execute force channel closure: %v", err)
   428  	}
   429  
   430  	// Now that the channel has been force closed, it should show up in the
   431  	// PendingChannels RPC under the waiting close section.
   432  	pendingChansRequest := &lnrpc.PendingChannelsRequest{}
   433  	ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
   434  	pendingChanResp, err := alice.PendingChannels(ctxt, pendingChansRequest)
   435  	if err != nil {
   436  		t.Fatalf("unable to query for pending channels: %v", err)
   437  	}
   438  	err = checkNumWaitingCloseChannels(pendingChanResp, 1)
   439  	if err != nil {
   440  		t.Fatalf(err.Error())
   441  	}
   442  
   443  	// Compute the outpoint of the channel, which we will use repeatedly to
   444  	// locate the pending channel information in the rpc responses.
   445  	txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
   446  	if err != nil {
   447  		t.Fatalf("unable to get txid: %v", err)
   448  	}
   449  	op := wire.OutPoint{
   450  		Hash:  *txid,
   451  		Index: chanPoint.OutputIndex,
   452  	}
   453  
   454  	waitingClose, err := findWaitingCloseChannel(pendingChanResp, &op)
   455  	if err != nil {
   456  		t.Fatalf(err.Error())
   457  	}
   458  
   459  	// Immediately after force closing, all of the funds should be in limbo.
   460  	if waitingClose.LimboBalance == 0 {
   461  		t.Fatalf("all funds should still be in limbo")
   462  	}
   463  
   464  	// Create a map of outpoints to expected resolutions for alice and carol
   465  	// which we will add reports to as we sweep outputs.
   466  	var (
   467  		aliceReports = make(map[string]*lnrpc.Resolution)
   468  		carolReports = make(map[string]*lnrpc.Resolution)
   469  	)
   470  
   471  	// The several restarts in this test are intended to ensure that when a
   472  	// channel is force-closed, the UTXO nursery has persisted the state of
   473  	// the channel in the closure process and will recover the correct state
   474  	// when the system comes back on line. This restart tests state
   475  	// persistence at the beginning of the process, when the commitment
   476  	// transaction has been broadcast but not yet confirmed in a block.
   477  	if err := net.RestartNode(alice, nil); err != nil {
   478  		t.Fatalf("Node restart failed: %v", err)
   479  	}
   480  
   481  	// To give the neutrino backend some time to catch up with the chain, we
   482  	// wait here until we have enough UTXOs to actually sweep the local and
   483  	// remote anchor.
   484  	const expectedUtxos = 2
   485  	assertNumUTXOs(t.t, alice, expectedUtxos)
   486  
   487  	// Mine a block which should confirm the commitment transaction
   488  	// broadcast as a result of the force closure. If there are anchors, we
   489  	// also expect the anchor sweep tx to be in the mempool.
   490  	expectedTxes := 1
   491  	expectedFeeRate := commitFeeRate
   492  	if channelType == lnrpc.CommitmentType_ANCHORS {
   493  		expectedTxes = 2
   494  		expectedFeeRate = actualFeeRate
   495  	}
   496  
   497  	sweepTxns, err := getNTxsFromMempool(
   498  		net.Miner.Node, expectedTxes, minerMempoolTimeout,
   499  	)
   500  	require.NoError(t.t, err, "sweep txns in miner mempool")
   501  
   502  	// Verify fee rate of the commitment tx plus anchor if present.
   503  	var totalSize, totalFee int64
   504  	for _, tx := range sweepTxns {
   505  		totalSize += int64(tx.SerializeSize())
   506  
   507  		fee, err := getTxFee(net.Miner.Node, tx)
   508  		require.NoError(t.t, err)
   509  		totalFee += int64(fee)
   510  	}
   511  	feeRate := totalFee * 1000 / totalSize
   512  
   513  	// Allow some deviation because weight estimates during tx generation
   514  	// are estimates.
   515  	//
   516  	// Note(decred): this is higher than the original because we overestimate
   517  	// by a higher margin and by size (not weight), therefore the margin for
   518  	// error is higher.
   519  	require.InEpsilon(t.t, expectedFeeRate, feeRate, 0.01)
   520  
   521  	// Find alice's commit sweep and anchor sweep (if present) in the
   522  	// mempool.
   523  	aliceCloseTx := waitingClose.Commitments.LocalTxid
   524  	_, aliceAnchor := findCommitAndAnchor(t, net, sweepTxns, aliceCloseTx)
   525  
   526  	// If we expect anchors, add alice's anchor to our expected set of
   527  	// reports.
   528  	if channelType == lnrpc.CommitmentType_ANCHORS {
   529  		aliceReports[aliceAnchor.OutPoint.String()] = &lnrpc.Resolution{
   530  			ResolutionType: lnrpc.ResolutionType_ANCHOR,
   531  			Outcome:        lnrpc.ResolutionOutcome_CLAIMED,
   532  			SweepTxid:      aliceAnchor.SweepTx.TxHash().String(),
   533  			Outpoint: &lnrpc.OutPoint{
   534  				TxidBytes:   aliceAnchor.OutPoint.Hash[:],
   535  				TxidStr:     aliceAnchor.OutPoint.Hash.String(),
   536  				OutputIndex: aliceAnchor.OutPoint.Index,
   537  			},
   538  			AmountAtoms: uint64(anchorSize),
   539  		}
   540  	}
   541  
   542  	if _, err := net.Generate(1); err != nil {
   543  		t.Fatalf("unable to generate block: %v", err)
   544  	}
   545  
   546  	// Now that the commitment has been confirmed, the channel should be
   547  	// marked as force closed.
   548  	err = wait.NoError(func() error {
   549  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   550  		pendingChanResp, err := alice.PendingChannels(
   551  			ctxt, pendingChansRequest,
   552  		)
   553  		if err != nil {
   554  			return fmt.Errorf("unable to query for pending "+
   555  				"channels: %v", err)
   556  		}
   557  
   558  		err = checkNumForceClosedChannels(pendingChanResp, 1)
   559  		if err != nil {
   560  			return err
   561  		}
   562  
   563  		forceClose, err := findForceClosedChannel(pendingChanResp, &op)
   564  		if err != nil {
   565  			return err
   566  		}
   567  
   568  		// Now that the channel has been force closed, it should now
   569  		// have the height and number of blocks to confirm populated.
   570  		err = checkCommitmentMaturity(
   571  			forceClose, commCsvMaturityHeight, int32(defaultCSV),
   572  		)
   573  		if err != nil {
   574  			return err
   575  		}
   576  
   577  		// None of our outputs have been swept, so they should all be in
   578  		// limbo. For anchors, we expect the anchor amount to be
   579  		// recovered.
   580  		if forceClose.LimboBalance == 0 {
   581  			return errors.New("all funds should still be in " +
   582  				"limbo")
   583  		}
   584  		expectedRecoveredBalance := int64(0)
   585  		if channelType == lnrpc.CommitmentType_ANCHORS {
   586  			expectedRecoveredBalance = anchorSize
   587  		}
   588  		if forceClose.RecoveredBalance != expectedRecoveredBalance {
   589  			return errors.New("no funds should yet be shown " +
   590  				"as recovered")
   591  		}
   592  
   593  		return nil
   594  	}, defaultTimeout)
   595  	if err != nil {
   596  		t.Fatalf(predErr.Error())
   597  	}
   598  
   599  	// The following restart is intended to ensure that outputs from the
   600  	// force close commitment transaction have been persisted once the
   601  	// transaction has been confirmed, but before the outputs are spendable
   602  	// (the "kindergarten" bucket.)
   603  	if err := net.RestartNode(alice, nil); err != nil {
   604  		t.Fatalf("Node restart failed: %v", err)
   605  	}
   606  
   607  	if channelType == lnrpc.CommitmentType_ANCHORS {
   608  		expectedTxes = 2
   609  	}
   610  
   611  	// Carol's sweep tx should be in the mempool already, as her output is
   612  	// not timelocked. If there are anchors, we also expect Carol's anchor
   613  	// sweep now.
   614  	sweepTxns, err = getNTxsFromMempool(
   615  		net.Miner.Node, expectedTxes, minerMempoolTimeout,
   616  	)
   617  	if err != nil {
   618  		t.Fatalf("failed to find Carol's sweep in miner mempool: %v",
   619  			err)
   620  	}
   621  
   622  	// Calculate the total fee Carol paid.
   623  	var totalFeeCarol dcrutil.Amount
   624  	for _, tx := range sweepTxns {
   625  		fee, err := getTxFee(net.Miner.Node, tx)
   626  		require.NoError(t.t, err)
   627  
   628  		totalFeeCarol += fee
   629  	}
   630  
   631  	// We look up the sweep txns we have found in mempool and create
   632  	// expected resolutions for carol.
   633  	carolCommit, carolAnchor := findCommitAndAnchor(
   634  		t, net, sweepTxns, aliceCloseTx,
   635  	)
   636  
   637  	// If we have anchors, add an anchor resolution for carol.
   638  	if channelType == lnrpc.CommitmentType_ANCHORS {
   639  		carolReports[carolAnchor.OutPoint.String()] = &lnrpc.Resolution{
   640  			ResolutionType: lnrpc.ResolutionType_ANCHOR,
   641  			Outcome:        lnrpc.ResolutionOutcome_CLAIMED,
   642  			SweepTxid:      carolAnchor.SweepTx.TxHash().String(),
   643  			AmountAtoms:    anchorSize,
   644  			Outpoint: &lnrpc.OutPoint{
   645  				TxidBytes:   carolAnchor.OutPoint.Hash[:],
   646  				TxidStr:     carolAnchor.OutPoint.Hash.String(),
   647  				OutputIndex: carolAnchor.OutPoint.Index,
   648  			},
   649  		}
   650  	}
   651  
   652  	// Currently within the codebase, the default CSV is 4 relative blocks.
   653  	// For the persistence test, we generate two blocks, then trigger
   654  	// a restart and then generate the final block that should trigger
   655  	// the creation of the sweep transaction.
   656  	if _, err := net.Generate(defaultCSV - 2); err != nil {
   657  		t.Fatalf("unable to mine blocks: %v", err)
   658  	}
   659  
   660  	// The following restart checks to ensure that outputs in the
   661  	// kindergarten bucket are persisted while waiting for the required
   662  	// number of confirmations to be reported.
   663  	if err := net.RestartNode(alice, nil); err != nil {
   664  		t.Fatalf("Node restart failed: %v", err)
   665  	}
   666  
   667  	// Alice should see the channel in her set of pending force closed
   668  	// channels with her funds still in limbo.
   669  	var aliceBalance int64
   670  	err = wait.NoError(func() error {
   671  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   672  		pendingChanResp, err := alice.PendingChannels(
   673  			ctxt, pendingChansRequest,
   674  		)
   675  		if err != nil {
   676  			return fmt.Errorf("unable to query for pending "+
   677  				"channels: %v", err)
   678  		}
   679  
   680  		err = checkNumForceClosedChannels(pendingChanResp, 1)
   681  		if err != nil {
   682  			return err
   683  		}
   684  
   685  		forceClose, err := findForceClosedChannel(
   686  			pendingChanResp, &op,
   687  		)
   688  		if err != nil {
   689  			return err
   690  		}
   691  
   692  		// Make a record of the balances we expect for alice and carol.
   693  		aliceBalance = forceClose.Channel.LocalBalance
   694  
   695  		// At this point, the nursery should show that the commitment
   696  		// output has 2 block left before its CSV delay expires. In
   697  		// total, we have mined exactly defaultCSV blocks, so the htlc
   698  		// outputs should also reflect that this many blocks have
   699  		// passed.
   700  		err = checkCommitmentMaturity(
   701  			forceClose, commCsvMaturityHeight, 2,
   702  		)
   703  		if err != nil {
   704  			return err
   705  		}
   706  
   707  		// All funds should still be shown in limbo.
   708  		if forceClose.LimboBalance == 0 {
   709  			return errors.New("all funds should still be in " +
   710  				"limbo")
   711  		}
   712  		expectedRecoveredBalance := int64(0)
   713  		if channelType == lnrpc.CommitmentType_ANCHORS {
   714  			expectedRecoveredBalance = anchorSize
   715  		}
   716  		if forceClose.RecoveredBalance != expectedRecoveredBalance {
   717  			return errors.New("no funds should yet be shown " +
   718  				"as recovered")
   719  		}
   720  
   721  		return nil
   722  	}, defaultTimeout)
   723  	if err != nil {
   724  		t.Fatalf(err.Error())
   725  	}
   726  
   727  	// Generate an additional block, which should cause the CSV delayed
   728  	// output from the commitment txn to expire.
   729  	if _, err := net.Generate(1); err != nil {
   730  		t.Fatalf("unable to mine blocks: %v", err)
   731  	}
   732  
   733  	// At this point, the CSV will expire in the next block, meaning that
   734  	// the sweeping transaction should now be broadcast. So we fetch the
   735  	// node's mempool to ensure it has been properly broadcast.
   736  	sweepingTXID, err := waitForTxInMempool(
   737  		net.Miner.Node, minerMempoolTimeout,
   738  	)
   739  	if err != nil {
   740  		t.Fatalf("failed to get sweep tx from mempool: %v", err)
   741  	}
   742  
   743  	// Fetch the sweep transaction, all input it's spending should be from
   744  	// the commitment transaction which was broadcast on-chain.
   745  	sweepTx, err := net.Miner.Node.GetRawTransaction(context.Background(), sweepingTXID)
   746  	if err != nil {
   747  		t.Fatalf("unable to fetch sweep tx: %v", err)
   748  	}
   749  	for _, txIn := range sweepTx.MsgTx().TxIn {
   750  		if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) {
   751  			t.Fatalf("sweep transaction not spending from commit "+
   752  				"tx %v, instead spending %v",
   753  				closingTxID, txIn.PreviousOutPoint)
   754  		}
   755  	}
   756  
   757  	// We expect a resolution which spends our commit output.
   758  	output := sweepTx.MsgTx().TxIn[0].PreviousOutPoint
   759  	aliceReports[output.String()] = &lnrpc.Resolution{
   760  		ResolutionType: lnrpc.ResolutionType_COMMIT,
   761  		Outcome:        lnrpc.ResolutionOutcome_CLAIMED,
   762  		SweepTxid:      sweepingTXID.String(),
   763  		Outpoint: &lnrpc.OutPoint{
   764  			TxidBytes:   output.Hash[:],
   765  			TxidStr:     output.Hash.String(),
   766  			OutputIndex: output.Index,
   767  		},
   768  		AmountAtoms: uint64(aliceBalance),
   769  	}
   770  
   771  	carolReports[carolCommit.OutPoint.String()] = &lnrpc.Resolution{
   772  		ResolutionType: lnrpc.ResolutionType_COMMIT,
   773  		Outcome:        lnrpc.ResolutionOutcome_CLAIMED,
   774  		Outpoint: &lnrpc.OutPoint{
   775  			TxidBytes:   carolCommit.OutPoint.Hash[:],
   776  			TxidStr:     carolCommit.OutPoint.Hash.String(),
   777  			OutputIndex: carolCommit.OutPoint.Index,
   778  		},
   779  		AmountAtoms: uint64(pushAmt),
   780  		SweepTxid:   carolCommit.SweepTx.TxHash().String(),
   781  	}
   782  
   783  	// Check that we can find the commitment sweep in our set of known
   784  	// sweeps, using the simple transaction id ListSweeps output.
   785  	assertSweepFound(t.t, alice, sweepingTXID.String(), false)
   786  
   787  	// Restart Alice to ensure that she resumes watching the finalized
   788  	// commitment sweep txid.
   789  	if err := net.RestartNode(alice, nil); err != nil {
   790  		t.Fatalf("Node restart failed: %v", err)
   791  	}
   792  
   793  	// Next, we mine an additional block which should include the sweep
   794  	// transaction as the input scripts and the sequence locks on the
   795  	// inputs should be properly met.
   796  	blockHash, err := net.Generate(1)
   797  	if err != nil {
   798  		t.Fatalf("unable to generate block: %v", err)
   799  	}
   800  	block, err := net.Miner.Node.GetBlock(context.Background(), blockHash[0])
   801  	if err != nil {
   802  		t.Fatalf("unable to get block: %v", err)
   803  	}
   804  
   805  	assertTxInBlock(t, block, sweepTx.Hash())
   806  
   807  	// Update current height
   808  	_, curHeight, err = net.Miner.Node.GetBestBlock(context.Background())
   809  	if err != nil {
   810  		t.Fatalf("unable to get best block height")
   811  	}
   812  
   813  	err = wait.Predicate(func() bool {
   814  		// Now that the commit output has been fully swept, check to see
   815  		// that the channel remains open for the pending htlc outputs.
   816  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   817  		pendingChanResp, err := alice.PendingChannels(
   818  			ctxt, pendingChansRequest,
   819  		)
   820  		if err != nil {
   821  			predErr = fmt.Errorf("unable to query for pending "+
   822  				"channels: %v", err)
   823  			return false
   824  		}
   825  
   826  		err = checkNumForceClosedChannels(pendingChanResp, 1)
   827  		if err != nil {
   828  			predErr = err
   829  			return false
   830  		}
   831  
   832  		// The commitment funds will have been recovered after the
   833  		// commit txn was included in the last block. The htlc funds
   834  		// will be shown in limbo.
   835  		forceClose, err := findForceClosedChannel(pendingChanResp, &op)
   836  		if err != nil {
   837  			predErr = err
   838  			return false
   839  		}
   840  		predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices)
   841  		if predErr != nil {
   842  			return false
   843  		}
   844  		predErr = checkPendingHtlcStageAndMaturity(
   845  			forceClose, 1, htlcExpiryHeight,
   846  			int32(htlcExpiryHeight-uint32(curHeight)),
   847  		)
   848  		if predErr != nil {
   849  			return false
   850  		}
   851  		if forceClose.LimboBalance == 0 {
   852  			predErr = fmt.Errorf("expected funds in limbo, found 0")
   853  			return false
   854  		}
   855  
   856  		return true
   857  	}, defaultTimeout)
   858  	if err != nil {
   859  		t.Fatalf(predErr.Error())
   860  	}
   861  
   862  	// Compute the height preceding that which will cause the htlc CLTV
   863  	// timeouts will expire. The outputs entered at the same height as the
   864  	// output spending from the commitment txn, so we must deduct the number
   865  	// of blocks we have generated since adding it to the nursery, and take
   866  	// an additional block off so that we end up one block shy of the expiry
   867  	// height, and add the block padding.
   868  	cltvHeightDelta := padCLTV(defaultCLTV - defaultCSV - 1 - 1)
   869  
   870  	// Advance the blockchain until just before the CLTV expires, nothing
   871  	// exciting should have happened during this time.
   872  	_, err = net.Generate(cltvHeightDelta)
   873  	if err != nil {
   874  		t.Fatalf("unable to generate block: %v", err)
   875  	}
   876  
   877  	// We now restart Alice, to ensure that she will broadcast the presigned
   878  	// htlc timeout txns after the delay expires after experiencing a while
   879  	// waiting for the htlc outputs to incubate.
   880  	if err := net.RestartNode(alice, nil); err != nil {
   881  		t.Fatalf("Node restart failed: %v", err)
   882  	}
   883  
   884  	// Alice should now see the channel in her set of pending force closed
   885  	// channels with one pending HTLC.
   886  	err = wait.NoError(func() error {
   887  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   888  		pendingChanResp, err := alice.PendingChannels(
   889  			ctxt, pendingChansRequest,
   890  		)
   891  		if err != nil {
   892  			return fmt.Errorf("unable to query for pending "+
   893  				"channels: %v", err)
   894  		}
   895  
   896  		err = checkNumForceClosedChannels(pendingChanResp, 1)
   897  		if err != nil {
   898  			return err
   899  		}
   900  
   901  		forceClose, err := findForceClosedChannel(
   902  			pendingChanResp, &op,
   903  		)
   904  		if err != nil {
   905  			return err
   906  		}
   907  
   908  		// We should now be at the block just before the utxo nursery
   909  		// will attempt to broadcast the htlc timeout transactions.
   910  		err = checkPendingChannelNumHtlcs(forceClose, numInvoices)
   911  		if err != nil {
   912  			return err
   913  		}
   914  		err = checkPendingHtlcStageAndMaturity(
   915  			forceClose, 1, htlcExpiryHeight, 1,
   916  		)
   917  		if err != nil {
   918  			return err
   919  		}
   920  
   921  		// Now that our commitment confirmation depth has been
   922  		// surpassed, we should now see a non-zero recovered balance.
   923  		// All htlc outputs are still left in limbo, so it should be
   924  		// non-zero as well.
   925  		if forceClose.LimboBalance == 0 {
   926  			return errors.New("htlc funds should still be in " +
   927  				"limbo")
   928  		}
   929  
   930  		return nil
   931  	}, defaultTimeout)
   932  	if err != nil {
   933  		t.Fatalf(err.Error())
   934  	}
   935  
   936  	// Now, generate the block which will cause Alice to broadcast the
   937  	// presigned htlc timeout txns.
   938  	_, err = net.Generate(1)
   939  	if err != nil {
   940  		t.Fatalf("unable to generate block: %v", err)
   941  	}
   942  
   943  	// Since Alice had numInvoices (6) htlcs extended to Carol before force
   944  	// closing, we expect Alice to broadcast an htlc timeout txn for each
   945  	// one.
   946  	expectedTxes = numInvoices
   947  
   948  	// In case of anchors, the timeout txs will be aggregated into one.
   949  	if channelType == lnrpc.CommitmentType_ANCHORS {
   950  		expectedTxes = 1
   951  	}
   952  
   953  	// Wait for them all to show up in the mempool.
   954  	htlcTxIDs, err := waitForNTxsInMempool(
   955  		net.Miner.Node, expectedTxes, minerMempoolTimeout,
   956  	)
   957  	if err != nil {
   958  		t.Fatalf("unable to find htlc timeout txns in mempool: %v", err)
   959  	}
   960  
   961  	// Retrieve each htlc timeout txn from the mempool, and ensure it is
   962  	// well-formed. This entails verifying that each only spends from
   963  	// output, and that that output is from the commitment txn. In case
   964  	// this is an anchor channel, the transactions are aggregated by the
   965  	// sweeper into one.
   966  	numInputs := 1
   967  	if channelType == lnrpc.CommitmentType_ANCHORS {
   968  		numInputs = numInvoices + 1
   969  	}
   970  
   971  	// Construct a map of the already confirmed htlc timeout outpoints,
   972  	// that will count the number of times each is spent by the sweep txn.
   973  	// We prepopulate it in this way so that we can later detect if we are
   974  	// spending from an output that was not a confirmed htlc timeout txn.
   975  	var htlcTxOutpointSet = make(map[wire.OutPoint]int)
   976  
   977  	var htlcLessFees uint64
   978  	for _, htlcTxID := range htlcTxIDs {
   979  		// Fetch the sweep transaction, all input it's spending should
   980  		// be from the commitment transaction which was broadcast
   981  		// on-chain. In case of an anchor type channel, we expect one
   982  		// extra input that is not spending from the commitment, that
   983  		// is added for fees.
   984  		htlcTx, err := net.Miner.Node.GetRawTransaction(context.Background(), htlcTxID)
   985  		if err != nil {
   986  			t.Fatalf("unable to fetch sweep tx: %v", err)
   987  		}
   988  
   989  		// Ensure the htlc transaction has the expected number of
   990  		// inputs.
   991  		inputs := htlcTx.MsgTx().TxIn
   992  		if len(inputs) != numInputs {
   993  			t.Fatalf("htlc transaction should only have %d txin, "+
   994  				"has %d", numInputs, len(htlcTx.MsgTx().TxIn))
   995  		}
   996  
   997  		// The number of outputs should be the same.
   998  		outputs := htlcTx.MsgTx().TxOut
   999  		if len(outputs) != numInputs {
  1000  			t.Fatalf("htlc transaction should only have %d"+
  1001  				"txout, has: %v", numInputs, len(outputs))
  1002  		}
  1003  
  1004  		// Ensure all the htlc transaction inputs are spending from the
  1005  		// commitment transaction, except if this is an extra input
  1006  		// added to pay for fees for anchor channels.
  1007  		nonCommitmentInputs := 0
  1008  		for i, txIn := range inputs {
  1009  			if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) {
  1010  				nonCommitmentInputs++
  1011  
  1012  				if nonCommitmentInputs > 1 {
  1013  					t.Fatalf("htlc transaction not "+
  1014  						"spending from commit "+
  1015  						"tx %v, instead spending %v",
  1016  						closingTxID,
  1017  						txIn.PreviousOutPoint)
  1018  				}
  1019  
  1020  				// This was an extra input added to pay fees,
  1021  				// continue to the next one.
  1022  				continue
  1023  			}
  1024  
  1025  			// For each htlc timeout transaction, we expect a
  1026  			// resolver report recording this on chain resolution
  1027  			// for both alice and carol.
  1028  			outpoint := txIn.PreviousOutPoint
  1029  			resolutionOutpoint := &lnrpc.OutPoint{
  1030  				TxidBytes:   outpoint.Hash[:],
  1031  				TxidStr:     outpoint.Hash.String(),
  1032  				OutputIndex: outpoint.Index,
  1033  			}
  1034  
  1035  			// We expect alice to have a timeout tx resolution with
  1036  			// an amount equal to the payment amount.
  1037  			aliceReports[outpoint.String()] = &lnrpc.Resolution{
  1038  				ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC,
  1039  				Outcome:        lnrpc.ResolutionOutcome_FIRST_STAGE,
  1040  				SweepTxid:      htlcTx.Hash().String(),
  1041  				Outpoint:       resolutionOutpoint,
  1042  				AmountAtoms:    uint64(paymentAmt),
  1043  			}
  1044  
  1045  			// We expect carol to have a resolution with an
  1046  			// incoming htlc timeout which reflects the full amount
  1047  			// of the htlc. It has no spend tx, because carol stops
  1048  			// monitoring the htlc once it has timed out.
  1049  			carolReports[outpoint.String()] = &lnrpc.Resolution{
  1050  				ResolutionType: lnrpc.ResolutionType_INCOMING_HTLC,
  1051  				Outcome:        lnrpc.ResolutionOutcome_TIMEOUT,
  1052  				SweepTxid:      "",
  1053  				Outpoint:       resolutionOutpoint,
  1054  				AmountAtoms:    uint64(paymentAmt),
  1055  			}
  1056  
  1057  			// Recorf the HTLC outpoint, such that we can later
  1058  			// check whether it gets swept
  1059  			op := wire.OutPoint{
  1060  				Hash:  *htlcTxID,
  1061  				Index: uint32(i),
  1062  			}
  1063  			htlcTxOutpointSet[op] = 0
  1064  		}
  1065  
  1066  		// We record the htlc amount less fees here, so that we know
  1067  		// what value to expect for the second stage of our htlc
  1068  		// htlc resolution.
  1069  		htlcLessFees = uint64(outputs[0].Value)
  1070  	}
  1071  
  1072  	// With the htlc timeout txns still in the mempool, we restart Alice to
  1073  	// verify that she can resume watching the htlc txns she broadcasted
  1074  	// before crashing.
  1075  	if err := net.RestartNode(alice, nil); err != nil {
  1076  		t.Fatalf("Node restart failed: %v", err)
  1077  	}
  1078  
  1079  	// Generate a block that mines the htlc timeout txns. Doing so now
  1080  	// activates the 2nd-stage CSV delayed outputs.
  1081  	_, err = net.Generate(1)
  1082  	if err != nil {
  1083  		t.Fatalf("unable to generate block: %v", err)
  1084  	}
  1085  
  1086  	// Alice is restarted here to ensure that she promptly moved the crib
  1087  	// outputs to the kindergarten bucket after the htlc timeout txns were
  1088  	// confirmed.
  1089  	if err := net.RestartNode(alice, nil); err != nil {
  1090  		t.Fatalf("Node restart failed: %v", err)
  1091  	}
  1092  
  1093  	// Advance the chain until just before the 2nd-layer CSV delays expire.
  1094  	// For anchor channels thhis is one block earlier.
  1095  	numBlocks := uint32(defaultCSV - 1)
  1096  	if channelType == lnrpc.CommitmentType_ANCHORS {
  1097  		numBlocks = defaultCSV - 2
  1098  
  1099  	}
  1100  	_, err = net.Generate(numBlocks)
  1101  	if err != nil {
  1102  		t.Fatalf("unable to generate block: %v", err)
  1103  	}
  1104  
  1105  	// Restart Alice to ensure that she can recover from a failure before
  1106  	// having graduated the htlc outputs in the kindergarten bucket.
  1107  	if err := net.RestartNode(alice, nil); err != nil {
  1108  		t.Fatalf("Node restart failed: %v", err)
  1109  	}
  1110  
  1111  	// Now that the channel has been fully swept, it should no longer show
  1112  	// incubated, check to see that Alice's node still reports the channel
  1113  	// as pending force closed.
  1114  	err = wait.Predicate(func() bool {
  1115  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1116  		pendingChanResp, err = alice.PendingChannels(
  1117  			ctxt, pendingChansRequest,
  1118  		)
  1119  		if err != nil {
  1120  			predErr = fmt.Errorf("unable to query for pending "+
  1121  				"channels: %v", err)
  1122  			return false
  1123  		}
  1124  		err = checkNumForceClosedChannels(pendingChanResp, 1)
  1125  		if err != nil {
  1126  			predErr = err
  1127  			return false
  1128  		}
  1129  
  1130  		forceClose, err := findForceClosedChannel(pendingChanResp, &op)
  1131  		if err != nil {
  1132  			predErr = err
  1133  			return false
  1134  		}
  1135  
  1136  		if forceClose.LimboBalance == 0 {
  1137  			predErr = fmt.Errorf("htlc funds should still be in limbo")
  1138  			return false
  1139  		}
  1140  
  1141  		predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices)
  1142  		return predErr == nil
  1143  	}, defaultTimeout)
  1144  	if err != nil {
  1145  		t.Fatalf(predErr.Error())
  1146  	}
  1147  
  1148  	// Generate a block that causes Alice to sweep the htlc outputs in the
  1149  	// kindergarten bucket.
  1150  	_, err = net.Generate(1)
  1151  	if err != nil {
  1152  		t.Fatalf("unable to generate block: %v", err)
  1153  	}
  1154  
  1155  	// Wait for the single sweep txn to appear in the mempool.
  1156  	htlcSweepTxID, err := waitForTxInMempool(
  1157  		net.Miner.Node, minerMempoolTimeout,
  1158  	)
  1159  	if err != nil {
  1160  		t.Fatalf("failed to get sweep tx from mempool: %v", err)
  1161  	}
  1162  
  1163  	// Fetch the htlc sweep transaction from the mempool.
  1164  	htlcSweepTx, err := net.Miner.Node.GetRawTransaction(context.Background(), htlcSweepTxID)
  1165  	if err != nil {
  1166  		t.Fatalf("unable to fetch sweep tx: %v", err)
  1167  	}
  1168  	// Ensure the htlc sweep transaction only has one input for each htlc
  1169  	// Alice extended before force closing.
  1170  	if len(htlcSweepTx.MsgTx().TxIn) != numInvoices {
  1171  		t.Fatalf("htlc transaction should have %d txin, "+
  1172  			"has %d", numInvoices, len(htlcSweepTx.MsgTx().TxIn))
  1173  	}
  1174  	outputCount := len(htlcSweepTx.MsgTx().TxOut)
  1175  	if outputCount != 1 {
  1176  		t.Fatalf("htlc sweep transaction should have one output, has: "+
  1177  			"%v", outputCount)
  1178  	}
  1179  
  1180  	// Ensure that each output spends from exactly one htlc timeout output.
  1181  	for _, txIn := range htlcSweepTx.MsgTx().TxIn {
  1182  		outpoint := txIn.PreviousOutPoint
  1183  		// Check that the input is a confirmed htlc timeout txn.
  1184  		if _, ok := htlcTxOutpointSet[outpoint]; !ok {
  1185  			t.Fatalf("htlc sweep output not spending from htlc "+
  1186  				"tx, instead spending output %v", outpoint)
  1187  		}
  1188  		// Increment our count for how many times this output was spent.
  1189  		htlcTxOutpointSet[outpoint]++
  1190  
  1191  		// Check that each is only spent once.
  1192  		if htlcTxOutpointSet[outpoint] > 1 {
  1193  			t.Fatalf("htlc sweep tx has multiple spends from "+
  1194  				"outpoint %v", outpoint)
  1195  		}
  1196  
  1197  		// Since we have now swept our htlc timeout tx, we expect to
  1198  		// have timeout resolutions for each of our htlcs.
  1199  		output := txIn.PreviousOutPoint
  1200  		aliceReports[output.String()] = &lnrpc.Resolution{
  1201  			ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC,
  1202  			Outcome:        lnrpc.ResolutionOutcome_TIMEOUT,
  1203  			SweepTxid:      htlcSweepTx.Hash().String(),
  1204  			Outpoint: &lnrpc.OutPoint{
  1205  				TxidBytes:   output.Hash[:],
  1206  				TxidStr:     output.Hash.String(),
  1207  				OutputIndex: output.Index,
  1208  			},
  1209  			AmountAtoms: htlcLessFees,
  1210  		}
  1211  	}
  1212  
  1213  	// Check that each HTLC output was spent exactly onece.
  1214  	for op, num := range htlcTxOutpointSet {
  1215  		if num != 1 {
  1216  			t.Fatalf("HTLC outpoint %v was spent %v times", op, num)
  1217  		}
  1218  	}
  1219  
  1220  	// Check that we can find the htlc sweep in our set of sweeps using
  1221  	// the verbose output of the listsweeps output.
  1222  	assertSweepFound(t.t, alice, htlcSweepTx.Hash().String(), true)
  1223  
  1224  	// The following restart checks to ensure that the nursery store is
  1225  	// storing the txid of the previously broadcast htlc sweep txn, and that
  1226  	// it begins watching that txid after restarting.
  1227  	if err := net.RestartNode(alice, nil); err != nil {
  1228  		t.Fatalf("Node restart failed: %v", err)
  1229  	}
  1230  
  1231  	// Now that the channel has been fully swept, it should no longer show
  1232  	// incubated, check to see that Alice's node still reports the channel
  1233  	// as pending force closed.
  1234  	err = wait.Predicate(func() bool {
  1235  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1236  		pendingChanResp, err := alice.PendingChannels(
  1237  			ctxt, pendingChansRequest,
  1238  		)
  1239  		if err != nil {
  1240  			predErr = fmt.Errorf("unable to query for pending "+
  1241  				"channels: %v", err)
  1242  			return false
  1243  		}
  1244  		err = checkNumForceClosedChannels(pendingChanResp, 1)
  1245  		if err != nil {
  1246  			predErr = err
  1247  			return false
  1248  		}
  1249  
  1250  		// All htlcs should show zero blocks until maturity, as
  1251  		// evidenced by having checked the sweep transaction in the
  1252  		// mempool.
  1253  		forceClose, err := findForceClosedChannel(pendingChanResp, &op)
  1254  		if err != nil {
  1255  			predErr = err
  1256  			return false
  1257  		}
  1258  		predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices)
  1259  		if predErr != nil {
  1260  			return false
  1261  		}
  1262  		err = checkPendingHtlcStageAndMaturity(
  1263  			forceClose, 2, htlcCsvMaturityHeight, 0,
  1264  		)
  1265  		if err != nil {
  1266  			predErr = err
  1267  			return false
  1268  		}
  1269  
  1270  		return true
  1271  	}, defaultTimeout)
  1272  	if err != nil {
  1273  		t.Fatalf(predErr.Error())
  1274  	}
  1275  
  1276  	// Generate the final block that sweeps all htlc funds into the user's
  1277  	// wallet, and make sure the sweep is in this block.
  1278  	block = mineBlocks(t, net, 1, 1)[0]
  1279  	assertTxInBlock(t, block, htlcSweepTxID)
  1280  
  1281  	// Now that the channel has been fully swept, it should no longer show
  1282  	// up within the pending channels RPC.
  1283  	err = wait.Predicate(func() bool {
  1284  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1285  		pendingChanResp, err := alice.PendingChannels(
  1286  			ctxt, pendingChansRequest,
  1287  		)
  1288  		if err != nil {
  1289  			predErr = fmt.Errorf("unable to query for pending "+
  1290  				"channels: %v", err)
  1291  			return false
  1292  		}
  1293  
  1294  		predErr = checkNumForceClosedChannels(pendingChanResp, 0)
  1295  		if predErr != nil {
  1296  			return false
  1297  		}
  1298  
  1299  		// In addition to there being no pending channels, we verify
  1300  		// that pending channels does not report any money still in
  1301  		// limbo.
  1302  		if pendingChanResp.TotalLimboBalance != 0 {
  1303  			predErr = errors.New("no user funds should be left " +
  1304  				"in limbo after incubation")
  1305  			return false
  1306  		}
  1307  
  1308  		return true
  1309  	}, defaultTimeout)
  1310  	if err != nil {
  1311  		t.Fatalf(predErr.Error())
  1312  	}
  1313  
  1314  	// At this point, Carol should now be aware of her new immediately
  1315  	// spendable on-chain balance, as it was Alice who broadcast the
  1316  	// commitment transaction.
  1317  	ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
  1318  	carolBalResp, err = carol.WalletBalance(ctxt, carolBalReq)
  1319  	require.NoError(t.t, err, "unable to get carol's balance")
  1320  
  1321  	// Carol's expected balance should be its starting balance plus the
  1322  	// push amount sent by Alice and minus the miner fee paid.
  1323  	carolExpectedBalance := dcrutil.Amount(carolStartingBalance) +
  1324  		pushAmt - totalFeeCarol
  1325  
  1326  	// In addition, if this is an anchor-enabled channel, further add the
  1327  	// anchor size.
  1328  	if channelType == lnrpc.CommitmentType_ANCHORS {
  1329  		carolExpectedBalance += dcrutil.Amount(anchorSize)
  1330  	}
  1331  
  1332  	require.Equal(
  1333  		t.t, carolExpectedBalance,
  1334  		dcrutil.Amount(carolBalResp.ConfirmedBalance),
  1335  		"carol's balance is incorrect",
  1336  	)
  1337  
  1338  	// Finally, we check that alice and carol have the set of resolutions
  1339  	// we expect.
  1340  	assertReports(t, alice, op, aliceReports)
  1341  	assertReports(t, carol, op, carolReports)
  1342  }
  1343  
  1344  // padCLTV is a small helper function that pads a cltv value with a block
  1345  // padding.
  1346  func padCLTV(cltv uint32) uint32 {
  1347  	return cltv + uint32(routing.BlockPadding)
  1348  }
  1349  
  1350  type sweptOutput struct {
  1351  	OutPoint wire.OutPoint
  1352  	SweepTx  *wire.MsgTx
  1353  }
  1354  
  1355  // findCommitAndAnchor looks for a commitment sweep and anchor sweep in the
  1356  // mempool. Our anchor output is identified by having multiple inputs, because
  1357  // we have to bring another input to add fees to the anchor. Note that the
  1358  // anchor swept output may be nil if the channel did not have anchors.
  1359  func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness,
  1360  	sweepTxns []*wire.MsgTx, closeTx string) (*sweptOutput, *sweptOutput) {
  1361  
  1362  	var commitSweep, anchorSweep *sweptOutput
  1363  	ctxb := context.Background()
  1364  
  1365  	for _, tx := range sweepTxns {
  1366  		txHash := tx.TxHash()
  1367  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1368  		sweepTx, err := net.Miner.Node.GetRawTransaction(ctxt, &txHash)
  1369  		require.NoError(t.t, err)
  1370  
  1371  		// For anchor detection in dcrlnd we do a slightly different
  1372  		// procedure because CPFP isn't fully integrated and due to fee
  1373  		// differences, anchor outputs behave slightly different in
  1374  		// some sweep situations (they end up swept along with the
  1375  		// commitment output).
  1376  		inputs := sweepTx.MsgTx().TxIn
  1377  		for _, txin := range inputs {
  1378  			// If the output sweeps from the close tx and its
  1379  			// ValueIn has the size of an anchor output, then it's
  1380  			// sweeping the anchor output.
  1381  			//
  1382  			// Otherwise, if the input spends from the close tx,
  1383  			// then this is sweeping the commitment output.
  1384  			//
  1385  			// Note: this logic is slighly broken in that it
  1386  			// _could_ also be sweeping an htlc, but this function
  1387  			// is only called in situations where the sweepTxs has
  1388  			// commitment or anchor sweeping txs.
  1389  			if txin.ValueIn == anchorSize && txin.PreviousOutPoint.Hash.String() == closeTx {
  1390  				anchorSweep = &sweptOutput{
  1391  					OutPoint: txin.PreviousOutPoint,
  1392  					SweepTx:  tx,
  1393  				}
  1394  
  1395  			} else if txin.PreviousOutPoint.Hash.String() == closeTx {
  1396  				commitSweep = &sweptOutput{
  1397  					OutPoint: txin.PreviousOutPoint,
  1398  					SweepTx:  tx,
  1399  				}
  1400  			}
  1401  		}
  1402  	}
  1403  
  1404  	return commitSweep, anchorSweep
  1405  }
  1406  
  1407  // testFailingChannel tests that we will fail the channel by force closing ii
  1408  // in the case where a counterparty tries to settle an HTLC with the wrong
  1409  // preimage.
  1410  func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) {
  1411  	ctxb := context.Background()
  1412  
  1413  	const (
  1414  		paymentAmt = 10000
  1415  	)
  1416  
  1417  	chanAmt := defaultChanAmt
  1418  
  1419  	// We'll introduce Carol, which will settle any incoming invoice with a
  1420  	// totally unrelated preimage.
  1421  	carol := net.NewNode(t.t, "Carol", []string{"--hodl.bogus-settle"})
  1422  	defer shutdownAndAssert(net, t, carol)
  1423  
  1424  	// Let Alice connect and open a channel to Carol,
  1425  	net.ConnectNodes(t.t, net.Alice, carol)
  1426  	chanPoint := openChannelAndAssert(
  1427  		t, net, net.Alice, carol,
  1428  		lntest.OpenChannelParams{
  1429  			Amt: chanAmt,
  1430  		},
  1431  	)
  1432  
  1433  	// With the channel open, we'll create a invoice for Carol that Alice
  1434  	// will attempt to pay.
  1435  	preimage := bytes.Repeat([]byte{byte(192)}, 32)
  1436  	invoice := &lnrpc.Invoice{
  1437  		Memo:      "testing",
  1438  		RPreimage: preimage,
  1439  		Value:     paymentAmt,
  1440  	}
  1441  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1442  	resp, err := carol.AddInvoice(ctxt, invoice)
  1443  	if err != nil {
  1444  		t.Fatalf("unable to add invoice: %v", err)
  1445  	}
  1446  	carolPayReqs := []string{resp.PaymentRequest}
  1447  
  1448  	// Wait for Alice to receive the channel edge from the funding manager.
  1449  	err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
  1450  	if err != nil {
  1451  		t.Fatalf("alice didn't see the alice->carol channel before "+
  1452  			"timeout: %v", err)
  1453  	}
  1454  
  1455  	// Send the payment from Alice to Carol. We expect Carol to attempt to
  1456  	// settle this payment with the wrong preimage.
  1457  	err = completePaymentRequests(
  1458  		net.Alice, net.Alice.RouterClient, carolPayReqs, false,
  1459  	)
  1460  	if err != nil {
  1461  		t.Fatalf("unable to send payments: %v", err)
  1462  	}
  1463  
  1464  	// Since Alice detects that Carol is trying to trick her by providing a
  1465  	// fake preimage, she should fail and force close the channel.
  1466  	var predErr error
  1467  	err = wait.Predicate(func() bool {
  1468  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
  1469  		ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
  1470  		pendingChanResp, err := net.Alice.PendingChannels(ctxt,
  1471  			pendingChansRequest)
  1472  		if err != nil {
  1473  			predErr = fmt.Errorf("unable to query for pending "+
  1474  				"channels: %v", err)
  1475  			return false
  1476  		}
  1477  		n := len(pendingChanResp.WaitingCloseChannels)
  1478  		if n != 1 {
  1479  			predErr = fmt.Errorf("expected to find %d channels "+
  1480  				"waiting close, found %d", 1, n)
  1481  			return false
  1482  		}
  1483  		return true
  1484  	}, defaultTimeout)
  1485  	if err != nil {
  1486  		t.Fatalf("%v", predErr)
  1487  	}
  1488  
  1489  	// Mine a block to confirm the broadcasted commitment.
  1490  	block := mineBlocks(t, net, 1, 1)[0]
  1491  	if len(block.Transactions) != 2 {
  1492  		t.Fatalf("transaction wasn't mined")
  1493  	}
  1494  
  1495  	// The channel should now show up as force closed both for Alice and
  1496  	// Carol.
  1497  	err = wait.Predicate(func() bool {
  1498  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
  1499  		ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
  1500  		pendingChanResp, err := net.Alice.PendingChannels(ctxt,
  1501  			pendingChansRequest)
  1502  		if err != nil {
  1503  			predErr = fmt.Errorf("unable to query for pending "+
  1504  				"channels: %v", err)
  1505  			return false
  1506  		}
  1507  		n := len(pendingChanResp.WaitingCloseChannels)
  1508  		if n != 0 {
  1509  			predErr = fmt.Errorf("expected to find %d channels "+
  1510  				"waiting close, found %d", 0, n)
  1511  			return false
  1512  		}
  1513  		n = len(pendingChanResp.PendingForceClosingChannels)
  1514  		if n != 1 {
  1515  			predErr = fmt.Errorf("expected to find %d channel "+
  1516  				"pending force close, found %d", 1, n)
  1517  			return false
  1518  		}
  1519  		return true
  1520  	}, defaultTimeout)
  1521  	if err != nil {
  1522  		t.Fatalf("%v", predErr)
  1523  	}
  1524  
  1525  	err = wait.Predicate(func() bool {
  1526  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
  1527  		ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
  1528  		pendingChanResp, err := carol.PendingChannels(ctxt,
  1529  			pendingChansRequest)
  1530  		if err != nil {
  1531  			predErr = fmt.Errorf("unable to query for pending "+
  1532  				"channels: %v", err)
  1533  			return false
  1534  		}
  1535  		n := len(pendingChanResp.PendingForceClosingChannels)
  1536  		if n != 1 {
  1537  			predErr = fmt.Errorf("expected to find %d channel "+
  1538  				"pending force close, found %d", 1, n)
  1539  			return false
  1540  		}
  1541  		return true
  1542  	}, defaultTimeout)
  1543  	if err != nil {
  1544  		t.Fatalf("%v", predErr)
  1545  	}
  1546  
  1547  	// Carol will use the correct preimage to resolve the HTLC on-chain.
  1548  	_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
  1549  	if err != nil {
  1550  		t.Fatalf("unable to find Carol's resolve tx in mempool: %v", err)
  1551  	}
  1552  
  1553  	// Mine enough blocks for Alice to sweep her funds from the force
  1554  	// closed channel.
  1555  	_, err = net.Generate(defaultCSV - 1)
  1556  	if err != nil {
  1557  		t.Fatalf("unable to generate blocks: %v", err)
  1558  	}
  1559  
  1560  	// Wait for the sweeping tx to be broadcast.
  1561  	_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
  1562  	if err != nil {
  1563  		t.Fatalf("unable to find Alice's sweep tx in mempool: %v", err)
  1564  	}
  1565  
  1566  	// Mine the sweep.
  1567  	_, err = net.Generate(1)
  1568  	if err != nil {
  1569  		t.Fatalf("unable to generate blocks: %v", err)
  1570  	}
  1571  
  1572  	// No pending channels should be left.
  1573  	err = wait.Predicate(func() bool {
  1574  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
  1575  		ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
  1576  		pendingChanResp, err := net.Alice.PendingChannels(ctxt,
  1577  			pendingChansRequest)
  1578  		if err != nil {
  1579  			predErr = fmt.Errorf("unable to query for pending "+
  1580  				"channels: %v", err)
  1581  			return false
  1582  		}
  1583  		n := len(pendingChanResp.PendingForceClosingChannels)
  1584  		if n != 0 {
  1585  			predErr = fmt.Errorf("expected to find %d channel "+
  1586  				"pending force close, found %d", 0, n)
  1587  			return false
  1588  		}
  1589  		return true
  1590  	}, defaultTimeout)
  1591  	if err != nil {
  1592  		t.Fatalf("%v", predErr)
  1593  	}
  1594  }