github.com/decred/dcrlnd@v0.7.6/lntest/itest/assertions.go (about)

     1  package itest
     2  
     3  import (
     4  	"context"
     5  	"encoding/hex"
     6  	"fmt"
     7  	"math"
     8  	"sync/atomic"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/decred/dcrd/chaincfg/chainhash"
    13  	"github.com/decred/dcrd/dcrutil/v4"
    14  	jsonrpctypes "github.com/decred/dcrd/rpc/jsonrpc/types/v4"
    15  	"github.com/decred/dcrd/rpcclient/v8"
    16  	"github.com/decred/dcrd/wire"
    17  	"github.com/decred/dcrlnd/channeldb"
    18  	"github.com/decred/dcrlnd/lnrpc"
    19  	"github.com/decred/dcrlnd/lnrpc/routerrpc"
    20  	"github.com/decred/dcrlnd/lnrpc/walletrpc"
    21  	"github.com/decred/dcrlnd/lntest"
    22  	"github.com/decred/dcrlnd/lntest/wait"
    23  	"github.com/decred/dcrlnd/sweep"
    24  	"github.com/go-errors/errors"
    25  	"github.com/stretchr/testify/require"
    26  	"google.golang.org/protobuf/proto"
    27  )
    28  
    29  // openChannelStream blocks until an OpenChannel request for a channel funding
    30  // by alice succeeds. If it does, a stream client is returned to receive events
    31  // about the opening channel.
    32  func openChannelStream(t *harnessTest, net *lntest.NetworkHarness,
    33  	alice, bob *lntest.HarnessNode,
    34  	p lntest.OpenChannelParams) lnrpc.Lightning_OpenChannelClient {
    35  
    36  	t.t.Helper()
    37  
    38  	// Wait until we are able to fund a channel successfully. This wait
    39  	// prevents us from erroring out when trying to create a channel while
    40  	// the node is starting up.
    41  	var chanOpenUpdate lnrpc.Lightning_OpenChannelClient
    42  	err := wait.NoError(func() error {
    43  		var err error
    44  		chanOpenUpdate, err = net.OpenChannel(alice, bob, p)
    45  		return err
    46  	}, defaultTimeout)
    47  	require.NoError(t.t, err, "unable to open channel")
    48  
    49  	return chanOpenUpdate
    50  }
    51  
    52  // openChannelAndAssert attempts to open a channel with the specified
    53  // parameters extended from Alice to Bob. Additionally, two items are asserted
    54  // after the channel is considered open: the funding transaction should be
    55  // found within a block, and that Alice can report the status of the new
    56  // channel.
    57  func openChannelAndAssert(t *harnessTest, net *lntest.NetworkHarness,
    58  	alice, bob *lntest.HarnessNode,
    59  	p lntest.OpenChannelParams) *lnrpc.ChannelPoint {
    60  
    61  	t.t.Helper()
    62  
    63  	chanOpenUpdate := openChannelStream(t, net, alice, bob, p)
    64  
    65  	// Mine 6 blocks, then wait for Alice's node to notify us that the
    66  	// channel has been opened. The funding transaction should be found
    67  	// within the first newly mined block. We mine 6 blocks so that in the
    68  	// case that the channel is public, it is announced to the network.
    69  	block := mineBlocks(t, net, 6, 1)[0]
    70  
    71  	fundingChanPoint, err := net.WaitForChannelOpen(chanOpenUpdate)
    72  	require.NoError(t.t, err, "error while waiting for channel open")
    73  
    74  	fundingTxID, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint)
    75  	require.NoError(t.t, err, "unable to get txid")
    76  
    77  	assertTxInBlock(t, block, fundingTxID)
    78  
    79  	// The channel should be listed in the peer information returned by
    80  	// both peers.
    81  	chanPoint := wire.OutPoint{
    82  		Hash:  *fundingTxID,
    83  		Index: fundingChanPoint.OutputIndex,
    84  	}
    85  	require.NoError(
    86  		t.t, net.AssertChannelExists(alice, &chanPoint),
    87  		"unable to assert channel existence",
    88  	)
    89  	require.NoError(
    90  		t.t, net.AssertChannelExists(bob, &chanPoint),
    91  		"unable to assert channel existence",
    92  	)
    93  
    94  	// They should also notice this channel from topology subscription.
    95  	err = alice.WaitForNetworkChannelOpen(fundingChanPoint)
    96  	require.Nil(t.t, err, "%s did not see channel open: %v", alice.Name(), err)
    97  
    98  	err = bob.WaitForNetworkChannelOpen(fundingChanPoint)
    99  	require.Nil(t.t, err, "%s did not see channel open: %v", bob.Name(), err)
   100  
   101  	return fundingChanPoint
   102  }
   103  
   104  func waitForGraphSync(t *harnessTest, node *lntest.HarnessNode) {
   105  	t.t.Helper()
   106  
   107  	err := wait.Predicate(func() bool {
   108  		ctxb := context.Background()
   109  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   110  		resp, err := node.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
   111  		require.NoError(t.t, err)
   112  
   113  		return resp.SyncedToGraph
   114  	}, defaultTimeout)
   115  	require.NoError(t.t, err)
   116  }
   117  
   118  // closeChannelAndAssert attempts to close a channel identified by the passed
   119  // channel point owned by the passed Lightning node. A fully blocking channel
   120  // closure is attempted, therefore the passed context should be a child derived
   121  // via timeout from a base parent. Additionally, once the channel has been
   122  // detected as closed, an assertion checks that the transaction is found within
   123  // a block. Finally, this assertion verifies that the node always sends out a
   124  // disable update when closing the channel if the channel was previously
   125  // enabled.
   126  //
   127  // NOTE: This method assumes that the provided funding point is confirmed
   128  // on-chain AND that the edge exists in the node's channel graph. If the funding
   129  // transactions was reorged out at some point, use closeReorgedChannelAndAssert.
   130  func closeChannelAndAssert(t *harnessTest, net *lntest.NetworkHarness,
   131  	node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint,
   132  	force bool) *chainhash.Hash {
   133  
   134  	return closeChannelAndAssertType(
   135  		t, net, node, fundingChanPoint, false, force,
   136  	)
   137  }
   138  
   139  func closeChannelAndAssertType(t *harnessTest,
   140  	net *lntest.NetworkHarness, node *lntest.HarnessNode,
   141  	fundingChanPoint *lnrpc.ChannelPoint,
   142  	anchors, force bool) *chainhash.Hash {
   143  
   144  	// If this is not a force close, we'll wait a few seconds for the
   145  	// channel to finish settling any outstanding HTLCs that might still be
   146  	// in flight.
   147  	if !force {
   148  		err := waitForPendingHtlcs(node, fundingChanPoint, 0)
   149  		if err != nil {
   150  			t.Fatalf("co-op channel close attempt with active htlcs: %v", err)
   151  		}
   152  	}
   153  
   154  	ctxb := context.Background()
   155  	ctxt, cancel := context.WithTimeout(ctxb, channelCloseTimeout)
   156  	defer cancel()
   157  
   158  	// Fetch the current channel policy. If the channel is currently
   159  	// enabled, we will register for graph notifications before closing to
   160  	// assert that the node sends out a disabling update as a result of the
   161  	// channel being closed.
   162  	curPolicy := getChannelPolicies(
   163  		t, node, node.PubKeyStr, fundingChanPoint,
   164  	)[0]
   165  	expectDisable := !curPolicy.Disabled
   166  
   167  	closeUpdates, _, err := net.CloseChannel(node, fundingChanPoint, force)
   168  	require.NoError(t.t, err, "unable to close channel %s from %s (force=%v)",
   169  		chanPointFundingToOutpoint(fundingChanPoint), node.Name(), force)
   170  
   171  	// If the channel policy was enabled prior to the closure, wait until we
   172  	// received the disabled update.
   173  	if expectDisable {
   174  		curPolicy.Disabled = true
   175  		assertChannelPolicyUpdate(
   176  			t.t, node, node.PubKeyStr,
   177  			curPolicy, fundingChanPoint, false,
   178  		)
   179  	}
   180  
   181  	return assertChannelClosed(
   182  		ctxt, t, net, node, fundingChanPoint, anchors, closeUpdates,
   183  	)
   184  }
   185  
   186  // closeReorgedChannelAndAssert attempts to close a channel identified by the
   187  // passed channel point owned by the passed Lightning node. A fully blocking
   188  // channel closure is attempted, therefore the passed context should be a child
   189  // derived via timeout from a base parent. Additionally, once the channel has
   190  // been detected as closed, an assertion checks that the transaction is found
   191  // within a block.
   192  //
   193  // NOTE: This method does not verify that the node sends a disable update for
   194  // the closed channel.
   195  func closeReorgedChannelAndAssert(t *harnessTest,
   196  	net *lntest.NetworkHarness, node *lntest.HarnessNode,
   197  	fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash {
   198  
   199  	ctxb := context.Background()
   200  	ctx, cancel := context.WithTimeout(ctxb, channelCloseTimeout)
   201  	defer cancel()
   202  
   203  	closeUpdates, _, err := net.CloseChannel(node, fundingChanPoint, force)
   204  	require.NoError(t.t, err, "unable to close channel")
   205  
   206  	return assertChannelClosed(
   207  		ctx, t, net, node, fundingChanPoint, false, closeUpdates,
   208  	)
   209  }
   210  
   211  // assertChannelClosed asserts that the channel is properly cleaned up after
   212  // initiating a cooperative or local close.
   213  func assertChannelClosed(ctx context.Context, t *harnessTest,
   214  	net *lntest.NetworkHarness, node *lntest.HarnessNode,
   215  	fundingChanPoint *lnrpc.ChannelPoint, anchors bool,
   216  	closeUpdates lnrpc.Lightning_CloseChannelClient) *chainhash.Hash {
   217  
   218  	txid, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint)
   219  	require.NoError(t.t, err, "unable to get txid")
   220  	chanPointStr := fmt.Sprintf("%v:%v", txid, fundingChanPoint.OutputIndex)
   221  
   222  	// If the channel appears in list channels, ensure that its state
   223  	// contains ChanStatusCoopBroadcasted.
   224  	listChansRequest := &lnrpc.ListChannelsRequest{}
   225  	listChansResp, err := node.ListChannels(ctx, listChansRequest)
   226  	require.NoError(t.t, err, "unable to query for list channels")
   227  
   228  	for _, channel := range listChansResp.Channels {
   229  		// Skip other channels.
   230  		if channel.ChannelPoint != chanPointStr {
   231  			continue
   232  		}
   233  
   234  		// Assert that the channel is in coop broadcasted.
   235  		require.Contains(
   236  			t.t, channel.ChanStatusFlags,
   237  			channeldb.ChanStatusCoopBroadcasted.String(),
   238  			"channel not coop broadcasted",
   239  		)
   240  	}
   241  
   242  	// At this point, the channel should now be marked as being in the
   243  	// state of "waiting close".
   244  	pendingChansRequest := &lnrpc.PendingChannelsRequest{}
   245  	pendingChanResp, err := node.PendingChannels(ctx, pendingChansRequest)
   246  	require.NoError(t.t, err, "unable to query for pending channels")
   247  
   248  	var found bool
   249  	for _, pendingClose := range pendingChanResp.WaitingCloseChannels {
   250  		if pendingClose.Channel.ChannelPoint == chanPointStr {
   251  			found = true
   252  			break
   253  		}
   254  	}
   255  	require.True(t.t, found, "channel not marked as waiting close")
   256  
   257  	// We'll now, generate a single block, wait for the final close status
   258  	// update, then ensure that the closing transaction was included in the
   259  	// block. If there are anchors, we also expect an anchor sweep.
   260  	expectedTxes := 1
   261  	if anchors {
   262  		expectedTxes = 2
   263  	}
   264  
   265  	block := mineBlocks(t, net, 1, expectedTxes)[0]
   266  
   267  	closingTxid, err := net.WaitForChannelClose(closeUpdates)
   268  	require.NoError(t.t, err, "error while waiting for channel close")
   269  
   270  	assertTxInBlock(t, block, closingTxid)
   271  
   272  	// Finally, the transaction should no longer be in the waiting close
   273  	// state as we've just mined a block that should include the closing
   274  	// transaction.
   275  	err = wait.Predicate(func() bool {
   276  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
   277  		pendingChanResp, err := node.PendingChannels(
   278  			ctx, pendingChansRequest,
   279  		)
   280  		if err != nil {
   281  			return false
   282  		}
   283  
   284  		for _, pendingClose := range pendingChanResp.WaitingCloseChannels {
   285  			if pendingClose.Channel.ChannelPoint == chanPointStr {
   286  				return false
   287  			}
   288  		}
   289  
   290  		return true
   291  	}, defaultTimeout)
   292  	require.NoError(
   293  		t.t, err, "closing transaction not marked as fully closed",
   294  	)
   295  
   296  	return closingTxid
   297  }
   298  
   299  // findForceClosedChannel searches a pending channel response for a particular
   300  // channel, returning the force closed channel upon success.
   301  func findForceClosedChannel(pendingChanResp *lnrpc.PendingChannelsResponse,
   302  	op fmt.Stringer) (*lnrpc.PendingChannelsResponse_ForceClosedChannel,
   303  	error) {
   304  
   305  	for _, forceClose := range pendingChanResp.PendingForceClosingChannels {
   306  		if forceClose.Channel.ChannelPoint == op.String() {
   307  			return forceClose, nil
   308  		}
   309  	}
   310  
   311  	return nil, errors.New("channel not marked as force closed")
   312  }
   313  
   314  // findWaitingCloseChannel searches a pending channel response for a particular
   315  // channel, returning the waiting close channel upon success.
   316  func findWaitingCloseChannel(pendingChanResp *lnrpc.PendingChannelsResponse,
   317  	op fmt.Stringer) (*lnrpc.PendingChannelsResponse_WaitingCloseChannel,
   318  	error) {
   319  
   320  	for _, waitingClose := range pendingChanResp.WaitingCloseChannels {
   321  		if waitingClose.Channel.ChannelPoint == op.String() {
   322  			return waitingClose, nil
   323  		}
   324  	}
   325  
   326  	return nil, errors.New("channel not marked as waiting close")
   327  }
   328  
   329  // waitForChannelPendingForceClose waits for the node to report that the
   330  // channel is pending force close, and that the UTXO nursery is aware of it.
   331  func waitForChannelPendingForceClose(node *lntest.HarnessNode,
   332  	fundingChanPoint *lnrpc.ChannelPoint) error {
   333  
   334  	ctxb := context.Background()
   335  	ctx, cancel := context.WithTimeout(ctxb, defaultTimeout)
   336  	defer cancel()
   337  
   338  	txid, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint)
   339  	if err != nil {
   340  		return err
   341  	}
   342  
   343  	op := wire.OutPoint{
   344  		Hash:  *txid,
   345  		Index: fundingChanPoint.OutputIndex,
   346  	}
   347  
   348  	return wait.NoError(func() error {
   349  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
   350  		pendingChanResp, err := node.PendingChannels(
   351  			ctx, pendingChansRequest,
   352  		)
   353  		if err != nil {
   354  			return fmt.Errorf("unable to get pending channels: %v",
   355  				err)
   356  		}
   357  
   358  		forceClose, err := findForceClosedChannel(pendingChanResp, &op)
   359  		if err != nil {
   360  			return fmt.Errorf("unable to find force-closed "+
   361  				"channel: %v", err)
   362  		}
   363  
   364  		// We must wait until the UTXO nursery has received the channel
   365  		// and is aware of its maturity height.
   366  		if forceClose.MaturityHeight == 0 {
   367  			return fmt.Errorf("channel had maturity height of 0")
   368  		}
   369  
   370  		return nil
   371  	}, defaultTimeout)
   372  }
   373  
   374  // lnrpcForceCloseChannel is a short type alias for a ridiculously long type
   375  // name in the lnrpc package.
   376  type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel
   377  
   378  // waitForNumChannelPendingForceClose waits for the node to report a certain
   379  // number of channels in state pending force close.
   380  func waitForNumChannelPendingForceClose(node *lntest.HarnessNode,
   381  	expectedNum int,
   382  	perChanCheck func(channel *lnrpcForceCloseChannel) error) error {
   383  
   384  	ctxb := context.Background()
   385  	ctx, cancel := context.WithTimeout(ctxb, defaultTimeout)
   386  	defer cancel()
   387  
   388  	return wait.NoError(func() error {
   389  		resp, err := node.PendingChannels(
   390  			ctx, &lnrpc.PendingChannelsRequest{},
   391  		)
   392  		if err != nil {
   393  			return fmt.Errorf("unable to get pending channels: %v",
   394  				err)
   395  		}
   396  
   397  		forceCloseChans := resp.PendingForceClosingChannels
   398  		if len(forceCloseChans) != expectedNum {
   399  			return fmt.Errorf("%v should have %d pending "+
   400  				"force close channels but has %d",
   401  				node.Cfg.Name, expectedNum,
   402  				len(forceCloseChans))
   403  		}
   404  
   405  		if perChanCheck != nil {
   406  			for _, forceCloseChan := range forceCloseChans {
   407  				err := perChanCheck(forceCloseChan)
   408  				if err != nil {
   409  					return err
   410  				}
   411  			}
   412  		}
   413  
   414  		return nil
   415  	}, defaultTimeout)
   416  }
   417  
   418  // cleanupForceClose mines a force close commitment found in the mempool and
   419  // the following sweep transaction from the force closing node.
   420  func cleanupForceClose(t *harnessTest, net *lntest.NetworkHarness,
   421  	node *lntest.HarnessNode, chanPoint *lnrpc.ChannelPoint) {
   422  
   423  	// Wait for the channel to be marked pending force close.
   424  	err := waitForChannelPendingForceClose(node, chanPoint)
   425  	require.NoError(t.t, err, "channel not pending force close")
   426  
   427  	// Mine enough blocks for the node to sweep its funds from the force
   428  	// closed channel.
   429  	//
   430  	// The commit sweeper resolver is able to broadcast the sweep tx up to
   431  	// one block before the CSV elapses, so wait until defaultCSV-1.
   432  	_, err = net.Generate(defaultCSV - 1)
   433  	require.NoError(t.t, err, "unable to generate blocks")
   434  
   435  	// We might find either 1 or 2 sweep txs in the mempool, depending on
   436  	// which nodes were online at the time of the cleanup. If we detect 2
   437  	// txs within three seconds, we can proceed directly. Otherwise we'll
   438  	// wait for one tx and then proceed.
   439  	_, err = waitForNTxsInMempool(
   440  		net.Miner.Node, 2, sweep.DefaultBatchWindowDuration+time.Second,
   441  	)
   442  	if err != nil {
   443  		_, err = waitForNTxsInMempool(
   444  			net.Miner.Node, 1, minerMempoolTimeout,
   445  		)
   446  		if err != nil {
   447  			t.Fatalf("unable to find a single sweep tx: %v", err)
   448  		}
   449  	}
   450  
   451  	// Mine the sweep tx(s)
   452  	_, err = net.Generate(1)
   453  	if err != nil {
   454  		t.Fatalf("unable to generate blocks: %v", err)
   455  	}
   456  
   457  	// Wait until the channel is no longer marked pendingForceClose. This is
   458  	// only enforced in dcrlnd, not lnd.
   459  	txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
   460  	if err != nil {
   461  		t.Fatalf("unable to get txid: %v", err)
   462  	}
   463  	chanPointStr := fmt.Sprintf("%v:%v", txid, chanPoint.OutputIndex)
   464  	err = wait.Predicate(func() bool {
   465  		ctxt, cancel := context.WithTimeout(context.Background(), defaultTimeout)
   466  		defer cancel()
   467  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
   468  		pendingChanResp, err := node.PendingChannels(
   469  			ctxt, pendingChansRequest,
   470  		)
   471  		if err != nil {
   472  			return false
   473  		}
   474  
   475  		var blocksTilMaturity int32 = -1
   476  		for _, pendingClose := range pendingChanResp.PendingForceClosingChannels {
   477  			if pendingClose.Channel.ChannelPoint == chanPointStr {
   478  				for _, htlc := range pendingClose.PendingHtlcs {
   479  					if htlc.BlocksTilMaturity > blocksTilMaturity {
   480  						blocksTilMaturity = htlc.BlocksTilMaturity
   481  					}
   482  				}
   483  
   484  				if blocksTilMaturity < 0 {
   485  					return false
   486  				}
   487  			}
   488  		}
   489  
   490  		// Mine blocks until all HTLC outputs are swept.
   491  		if blocksTilMaturity >= 0 {
   492  			mineBlocks(t, net, uint32(blocksTilMaturity), 0)
   493  			time.Sleep(sweep.DefaultBatchWindowDuration + time.Second)
   494  			mineBlocks(t, net, 1, 0)
   495  			return false
   496  		}
   497  
   498  		return true
   499  	}, time.Second*30)
   500  	if err != nil {
   501  		t.Fatalf("force-closed channel still not cleaned up after timeout: %v", err)
   502  	}
   503  }
   504  
   505  // numOpenChannelsPending sends an RPC request to a node to get a count of the
   506  // node's channels that are currently in a pending state (with a broadcast, but
   507  // not confirmed funding transaction).
   508  func numOpenChannelsPending(ctxt context.Context,
   509  	node *lntest.HarnessNode) (int, error) {
   510  
   511  	pendingChansRequest := &lnrpc.PendingChannelsRequest{}
   512  	resp, err := node.PendingChannels(ctxt, pendingChansRequest)
   513  	if err != nil {
   514  		return 0, err
   515  	}
   516  	return len(resp.PendingOpenChannels), nil
   517  }
   518  
   519  // assertNumOpenChannelsPending asserts that a pair of nodes have the expected
   520  // number of pending channels between them.
   521  func assertNumOpenChannelsPending(t *harnessTest,
   522  	alice, bob *lntest.HarnessNode, expected int) {
   523  
   524  	ctxb := context.Background()
   525  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
   526  	defer cancel()
   527  
   528  	err := wait.NoError(func() error {
   529  		aliceNumChans, err := numOpenChannelsPending(ctxt, alice)
   530  		if err != nil {
   531  			return fmt.Errorf("error fetching alice's node (%v) "+
   532  				"pending channels %v", alice.NodeID, err)
   533  		}
   534  		bobNumChans, err := numOpenChannelsPending(ctxt, bob)
   535  		if err != nil {
   536  			return fmt.Errorf("error fetching bob's node (%v) "+
   537  				"pending channels %v", bob.NodeID, err)
   538  		}
   539  
   540  		aliceStateCorrect := aliceNumChans == expected
   541  		if !aliceStateCorrect {
   542  			return fmt.Errorf("number of pending channels for "+
   543  				"alice incorrect. expected %v, got %v",
   544  				expected, aliceNumChans)
   545  		}
   546  
   547  		bobStateCorrect := bobNumChans == expected
   548  		if !bobStateCorrect {
   549  			return fmt.Errorf("number of pending channels for bob "+
   550  				"incorrect. expected %v, got %v", expected,
   551  				bobNumChans)
   552  		}
   553  
   554  		return nil
   555  	}, defaultTimeout)
   556  	require.NoError(t.t, err)
   557  }
   558  
   559  // checkPeerInPeersList returns true if Bob appears in Alice's peer list.
   560  func checkPeerInPeersList(ctx context.Context, alice,
   561  	bob *lntest.HarnessNode) (bool, error) {
   562  
   563  	peers, err := alice.ListPeers(ctx, &lnrpc.ListPeersRequest{})
   564  	if err != nil {
   565  		return false, fmt.Errorf(
   566  			"error listing %s's node (%v) peers: %v",
   567  			alice.Name(), alice.NodeID, err,
   568  		)
   569  	}
   570  
   571  	for _, peer := range peers.Peers {
   572  		if peer.PubKey == bob.PubKeyStr {
   573  			return true, nil
   574  		}
   575  	}
   576  
   577  	return false, nil
   578  }
   579  
   580  // assertConnected asserts that two peers are connected.
   581  func assertConnected(t *harnessTest, alice, bob *lntest.HarnessNode) {
   582  	ctxb := context.Background()
   583  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
   584  	defer cancel()
   585  
   586  	err := wait.NoError(func() error {
   587  		bobIsAlicePeer, err := checkPeerInPeersList(ctxt, alice, bob)
   588  		if err != nil {
   589  			return err
   590  		}
   591  
   592  		if !bobIsAlicePeer {
   593  			return fmt.Errorf(
   594  				"expected %s and %s to be connected "+
   595  					"but %s is not in %s's peer list",
   596  				alice.Name(), bob.Name(),
   597  				bob.Name(), alice.Name(),
   598  			)
   599  		}
   600  
   601  		aliceIsBobPeer, err := checkPeerInPeersList(ctxt, bob, alice)
   602  		if err != nil {
   603  			return err
   604  		}
   605  
   606  		if !aliceIsBobPeer {
   607  			return fmt.Errorf(
   608  				"expected %s and %s to be connected "+
   609  					"but %s is not in %s's peer list",
   610  				alice.Name(), bob.Name(),
   611  				alice.Name(), bob.Name(),
   612  			)
   613  		}
   614  
   615  		return nil
   616  
   617  	}, defaultTimeout)
   618  	require.NoError(t.t, err)
   619  }
   620  
   621  // assertNotConnected asserts that two peers are not connected.
   622  func assertNotConnected(t *harnessTest, alice, bob *lntest.HarnessNode) {
   623  	ctxb := context.Background()
   624  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
   625  	defer cancel()
   626  
   627  	err := wait.NoError(func() error {
   628  		bobIsAlicePeer, err := checkPeerInPeersList(ctxt, alice, bob)
   629  		if err != nil {
   630  			return err
   631  		}
   632  
   633  		if bobIsAlicePeer {
   634  			return fmt.Errorf(
   635  				"expected %s and %s not to be "+
   636  					"connected but %s is in %s's "+
   637  					"peer list",
   638  				alice.Name(), bob.Name(),
   639  				bob.Name(), alice.Name(),
   640  			)
   641  		}
   642  
   643  		aliceIsBobPeer, err := checkPeerInPeersList(ctxt, bob, alice)
   644  		if err != nil {
   645  			return err
   646  		}
   647  
   648  		if aliceIsBobPeer {
   649  			return fmt.Errorf(
   650  				"expected %s and %s not to be "+
   651  					"connected but %s is in %s's "+
   652  					"peer list",
   653  				alice.Name(), bob.Name(),
   654  				alice.Name(), bob.Name(),
   655  			)
   656  		}
   657  
   658  		return nil
   659  
   660  	}, defaultTimeout)
   661  	require.NoError(t.t, err)
   662  }
   663  
   664  // shutdownAndAssert shuts down the given node and asserts that no errors
   665  // occur.
   666  func shutdownAndAssert(net *lntest.NetworkHarness, t *harnessTest,
   667  	node *lntest.HarnessNode) {
   668  
   669  	// The process may not be in a state to always shutdown immediately, so
   670  	// we'll retry up to a hard limit to ensure we eventually shutdown.
   671  	err := wait.NoError(func() error {
   672  		return net.ShutdownNode(node)
   673  	}, defaultTimeout)
   674  	require.NoErrorf(t.t, err, "unable to shutdown %v", node.Name())
   675  }
   676  
   677  // assertChannelBalanceResp makes a ChannelBalance request and checks the
   678  // returned response matches the expected.
   679  func assertChannelBalanceResp(t *harnessTest,
   680  	node *lntest.HarnessNode, expected *lnrpc.ChannelBalanceResponse) {
   681  
   682  	resp := getChannelBalance(t, node)
   683  
   684  	// Zero out the max inbound/outbound amounts. That's a dcrlnd specific
   685  	// field and there's a specific test to assert it works correctly under
   686  	// various scenarios.
   687  	//
   688  	// In the future original tests that use this could be adjusted to
   689  	// also assert their correct values.
   690  	expected.MaxInboundAmount = resp.MaxInboundAmount
   691  	expected.MaxOutboundAmount = resp.MaxOutboundAmount
   692  
   693  	require.True(t.t, proto.Equal(expected, resp), "balance is incorrect "+
   694  		"got: %v, want: %v", resp, expected)
   695  }
   696  
   697  // getChannelBalance gets the channel balance.
   698  func getChannelBalance(t *harnessTest,
   699  	node *lntest.HarnessNode) *lnrpc.ChannelBalanceResponse {
   700  
   701  	t.t.Helper()
   702  
   703  	ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout)
   704  	req := &lnrpc.ChannelBalanceRequest{}
   705  	resp, err := node.ChannelBalance(ctxt, req)
   706  
   707  	require.NoError(t.t, err, "unable to get node's balance")
   708  	return resp
   709  }
   710  
   711  // txStr returns the string representation of the channel's funding transaction.
   712  func txStr(chanPoint *lnrpc.ChannelPoint) string {
   713  	fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint)
   714  	if err != nil {
   715  		return ""
   716  	}
   717  	cp := wire.OutPoint{
   718  		Hash:  *fundingTxID,
   719  		Index: chanPoint.OutputIndex,
   720  	}
   721  	return cp.String()
   722  }
   723  
   724  // getChannelPolicies queries the channel graph and retrieves the current edge
   725  // policies for the provided channel points.
   726  func getChannelPolicies(t *harnessTest, node *lntest.HarnessNode,
   727  	advertisingNode string,
   728  	chanPoints ...*lnrpc.ChannelPoint) []*lnrpc.RoutingPolicy {
   729  
   730  	ctxb := context.Background()
   731  
   732  	descReq := &lnrpc.ChannelGraphRequest{
   733  		IncludeUnannounced: true,
   734  	}
   735  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   736  	chanGraph, err := node.DescribeGraph(ctxt, descReq)
   737  	require.NoError(t.t, err, "unable to query for alice's graph")
   738  
   739  	var policies []*lnrpc.RoutingPolicy
   740  	err = wait.NoError(func() error {
   741  	out:
   742  		for _, chanPoint := range chanPoints {
   743  			for _, e := range chanGraph.Edges {
   744  				if e.ChanPoint != txStr(chanPoint) {
   745  					continue
   746  				}
   747  
   748  				if e.Node1Pub == advertisingNode {
   749  					policies = append(policies,
   750  						e.Node1Policy)
   751  				} else {
   752  					policies = append(policies,
   753  						e.Node2Policy)
   754  				}
   755  
   756  				continue out
   757  			}
   758  
   759  			// If we've iterated over all the known edges and we weren't
   760  			// able to find this specific one, then we'll fail.
   761  			return fmt.Errorf("did not find edge %v", txStr(chanPoint))
   762  		}
   763  
   764  		return nil
   765  	}, defaultTimeout)
   766  	require.NoError(t.t, err)
   767  
   768  	return policies
   769  }
   770  
   771  // assertChannelPolicy asserts that the passed node's known channel policy for
   772  // the passed chanPoint is consistent with the expected policy values.
   773  func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
   774  	advertisingNode string, expectedPolicy *lnrpc.RoutingPolicy,
   775  	chanPoints ...*lnrpc.ChannelPoint) {
   776  
   777  	policies := getChannelPolicies(t, node, advertisingNode, chanPoints...)
   778  	for _, policy := range policies {
   779  		err := lntest.CheckChannelPolicy(policy, expectedPolicy)
   780  		if err != nil {
   781  			t.Fatalf(fmt.Sprintf("%v: %s", err.Error(), node))
   782  		}
   783  	}
   784  }
   785  
   786  // assertMinerBlockHeightDelta ensures that tempMiner is 'delta' blocks ahead
   787  // of miner.
   788  func assertMinerBlockHeightDelta(t *harnessTest,
   789  	miner, tempMiner *lntest.HarnessMiner, delta int64) {
   790  
   791  	// Ensure the chain lengths are what we expect.
   792  	var predErr error
   793  	err := wait.Predicate(func() bool {
   794  		_, tempMinerHeight, err := tempMiner.Node.GetBestBlock(context.Background())
   795  		if err != nil {
   796  			predErr = fmt.Errorf("unable to get current "+
   797  				"blockheight %v", err)
   798  			return false
   799  		}
   800  
   801  		_, minerHeight, err := miner.Node.GetBestBlock(context.Background())
   802  		if err != nil {
   803  			predErr = fmt.Errorf("unable to get current "+
   804  				"blockheight %v", err)
   805  			return false
   806  		}
   807  
   808  		if tempMinerHeight != minerHeight+delta {
   809  			predErr = fmt.Errorf("expected new miner(%d) to be %d "+
   810  				"blocks ahead of original miner(%d)",
   811  				tempMinerHeight, delta, minerHeight)
   812  			return false
   813  		}
   814  		return true
   815  	}, defaultTimeout)
   816  	if err != nil {
   817  		t.Fatalf(predErr.Error())
   818  	}
   819  }
   820  
   821  func checkCommitmentMaturity(
   822  	forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
   823  	maturityHeight uint32, blocksTilMaturity int32) error {
   824  
   825  	if forceClose.MaturityHeight != maturityHeight {
   826  		return fmt.Errorf("expected commitment maturity height to be "+
   827  			"%d, found %d instead", maturityHeight,
   828  			forceClose.MaturityHeight)
   829  	}
   830  	if forceClose.BlocksTilMaturity != blocksTilMaturity {
   831  		return fmt.Errorf("expected commitment blocks til maturity to "+
   832  			"be %d, found %d instead", blocksTilMaturity,
   833  			forceClose.BlocksTilMaturity)
   834  	}
   835  
   836  	return nil
   837  }
   838  
   839  // checkForceClosedChannelNumHtlcs verifies that a force closed channel has the
   840  // proper number of htlcs.
   841  func checkPendingChannelNumHtlcs(
   842  	forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
   843  	expectedNumHtlcs int) error {
   844  
   845  	if len(forceClose.PendingHtlcs) != expectedNumHtlcs {
   846  		return fmt.Errorf("expected force closed channel to have %d "+
   847  			"pending htlcs, found %d instead", expectedNumHtlcs,
   848  			len(forceClose.PendingHtlcs))
   849  	}
   850  
   851  	return nil
   852  }
   853  
   854  // checkNumForceClosedChannels checks that a pending channel response has the
   855  // expected number of force closed channels.
   856  func checkNumForceClosedChannels(pendingChanResp *lnrpc.PendingChannelsResponse,
   857  	expectedNumChans int) error {
   858  
   859  	if len(pendingChanResp.PendingForceClosingChannels) != expectedNumChans {
   860  		return fmt.Errorf("expected to find %d force closed channels, "+
   861  			"got %d", expectedNumChans,
   862  			len(pendingChanResp.PendingForceClosingChannels))
   863  	}
   864  
   865  	return nil
   866  }
   867  
   868  // checkNumWaitingCloseChannels checks that a pending channel response has the
   869  // expected number of channels waiting for closing tx to confirm.
   870  func checkNumWaitingCloseChannels(pendingChanResp *lnrpc.PendingChannelsResponse,
   871  	expectedNumChans int) error {
   872  
   873  	if len(pendingChanResp.WaitingCloseChannels) != expectedNumChans {
   874  		return fmt.Errorf("expected to find %d channels waiting "+
   875  			"closure, got %d", expectedNumChans,
   876  			len(pendingChanResp.WaitingCloseChannels))
   877  	}
   878  
   879  	return nil
   880  }
   881  
   882  // checkPendingHtlcStageAndMaturity uniformly tests all pending htlc's belonging
   883  // to a force closed channel, testing for the expected stage number, blocks till
   884  // maturity, and the maturity height.
   885  func checkPendingHtlcStageAndMaturity(
   886  	forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
   887  	stage, maturityHeight uint32, blocksTillMaturity int32) error {
   888  
   889  	for _, pendingHtlc := range forceClose.PendingHtlcs {
   890  		if pendingHtlc.Stage != stage {
   891  			return fmt.Errorf("expected pending htlc to be stage "+
   892  				"%d, found %d", stage, pendingHtlc.Stage)
   893  		}
   894  		if pendingHtlc.MaturityHeight != maturityHeight {
   895  			return fmt.Errorf("expected pending htlc maturity "+
   896  				"height to be %d, instead has %d",
   897  				maturityHeight, pendingHtlc.MaturityHeight)
   898  		}
   899  		if pendingHtlc.BlocksTilMaturity != blocksTillMaturity {
   900  			return fmt.Errorf("expected pending htlc blocks til "+
   901  				"maturity to be %d, instead has %d",
   902  				blocksTillMaturity,
   903  				pendingHtlc.BlocksTilMaturity)
   904  		}
   905  	}
   906  
   907  	return nil
   908  }
   909  
   910  // assertReports checks that the count of resolutions we have present per
   911  // type matches a set of expected resolutions.
   912  func assertReports(t *harnessTest, node *lntest.HarnessNode,
   913  	channelPoint wire.OutPoint, expected map[string]*lnrpc.Resolution) {
   914  
   915  	// Get our node's closed channels.
   916  	ctxb := context.Background()
   917  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
   918  	defer cancel()
   919  
   920  	closed, err := node.ClosedChannels(
   921  		ctxt, &lnrpc.ClosedChannelsRequest{},
   922  	)
   923  	require.NoError(t.t, err)
   924  
   925  	var resolutions []*lnrpc.Resolution
   926  	for _, close := range closed.Channels {
   927  		if close.ChannelPoint == channelPoint.String() {
   928  			resolutions = close.Resolutions
   929  			break
   930  		}
   931  	}
   932  
   933  	require.NotNil(t.t, resolutions)
   934  	require.Equal(t.t, len(expected), len(resolutions))
   935  
   936  	for _, res := range resolutions {
   937  		outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr,
   938  			res.Outpoint.OutputIndex)
   939  
   940  		expected, ok := expected[outPointStr]
   941  		require.True(t.t, ok)
   942  		require.Equal(t.t, expected, res)
   943  	}
   944  }
   945  
   946  // assertSweepFound looks up a sweep in a nodes list of broadcast sweeps.
   947  func assertSweepFound(t *testing.T, node *lntest.HarnessNode,
   948  	sweep string, verbose bool) {
   949  
   950  	// List all sweeps that alice's node had broadcast.
   951  	ctxb := context.Background()
   952  	ctx, cancel := context.WithTimeout(ctxb, defaultTimeout)
   953  	defer cancel()
   954  	sweepResp, err := node.WalletKitClient.ListSweeps(
   955  		ctx, &walletrpc.ListSweepsRequest{
   956  			Verbose: verbose,
   957  		},
   958  	)
   959  	require.NoError(t, err)
   960  
   961  	var found bool
   962  	if verbose {
   963  		found = findSweepInDetails(t, sweep, sweepResp)
   964  	} else {
   965  		found = findSweepInTxids(t, sweep, sweepResp)
   966  	}
   967  
   968  	require.True(t, found, "sweep: %v not found", sweep)
   969  }
   970  
   971  func findSweepInTxids(t *testing.T, sweepTxid string,
   972  	sweepResp *walletrpc.ListSweepsResponse) bool {
   973  
   974  	sweepTxIDs := sweepResp.GetTransactionIds()
   975  	require.NotNil(t, sweepTxIDs, "expected transaction ids")
   976  	require.Nil(t, sweepResp.GetTransactionDetails())
   977  
   978  	// Check that the sweep tx we have just produced is present.
   979  	for _, tx := range sweepTxIDs.TransactionIds {
   980  		if tx == sweepTxid {
   981  			return true
   982  		}
   983  	}
   984  
   985  	return false
   986  }
   987  
   988  func findSweepInDetails(t *testing.T, sweepTxid string,
   989  	sweepResp *walletrpc.ListSweepsResponse) bool {
   990  
   991  	sweepDetails := sweepResp.GetTransactionDetails()
   992  	require.NotNil(t, sweepDetails, "expected transaction details")
   993  	require.Nil(t, sweepResp.GetTransactionIds())
   994  
   995  	for _, tx := range sweepDetails.Transactions {
   996  		if tx.TxHash == sweepTxid {
   997  			return true
   998  		}
   999  	}
  1000  
  1001  	return false
  1002  }
  1003  
  1004  // assertAmountSent generates a closure which queries listchannels for sndr and
  1005  // rcvr, and asserts that sndr sent amt satoshis, and that rcvr received amt
  1006  // satoshis.
  1007  //
  1008  // NOTE: This method assumes that each node only has one channel, and it is the
  1009  // channel used to send the payment.
  1010  func assertAmountSent(amt dcrutil.Amount, sndr, rcvr *lntest.HarnessNode) func() error {
  1011  	return func() error {
  1012  		// Both channels should also have properly accounted from the
  1013  		// amount that has been sent/received over the channel.
  1014  		listReq := &lnrpc.ListChannelsRequest{}
  1015  		ctxb := context.Background()
  1016  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1017  		sndrListChannels, err := sndr.ListChannels(ctxt, listReq)
  1018  		if err != nil {
  1019  			return fmt.Errorf("unable to query for %s's channel "+
  1020  				"list: %v", sndr.Name(), err)
  1021  		}
  1022  		sndrSatoshisSent := sndrListChannels.Channels[0].TotalAtomsSent
  1023  		if sndrSatoshisSent != int64(amt) {
  1024  			return fmt.Errorf("%s's atoms sent is incorrect "+
  1025  				"got %v, expected %v", sndr.Name(),
  1026  				sndrSatoshisSent, amt)
  1027  		}
  1028  
  1029  		ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
  1030  		rcvrListChannels, err := rcvr.ListChannels(ctxt, listReq)
  1031  		if err != nil {
  1032  			return fmt.Errorf("unable to query for %s's channel "+
  1033  				"list: %v", rcvr.Name(), err)
  1034  		}
  1035  		rcvrSatoshisReceived := rcvrListChannels.Channels[0].TotalAtomsReceived
  1036  		if rcvrSatoshisReceived != int64(amt) {
  1037  			return fmt.Errorf("%s's atoms received is "+
  1038  				"incorrect got %v, expected %v", rcvr.Name(),
  1039  				rcvrSatoshisReceived, amt)
  1040  		}
  1041  
  1042  		return nil
  1043  	}
  1044  }
  1045  
  1046  // assertLastHTLCError checks that the last sent HTLC of the last payment sent
  1047  // by the given node failed with the expected failure code.
  1048  func assertLastHTLCError(t *harnessTest, node *lntest.HarnessNode,
  1049  	code lnrpc.Failure_FailureCode) {
  1050  
  1051  	req := &lnrpc.ListPaymentsRequest{
  1052  		IncludeIncomplete: true,
  1053  	}
  1054  	ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout)
  1055  	paymentsResp, err := node.ListPayments(ctxt, req)
  1056  	require.NoError(t.t, err, "error when obtaining payments")
  1057  
  1058  	payments := paymentsResp.Payments
  1059  	require.NotZero(t.t, len(payments), "no payments found")
  1060  
  1061  	payment := payments[len(payments)-1]
  1062  	htlcs := payment.Htlcs
  1063  	require.NotZero(t.t, len(htlcs), "no htlcs")
  1064  
  1065  	htlc := htlcs[len(htlcs)-1]
  1066  	require.NotNil(t.t, htlc.Failure, "expected failure")
  1067  
  1068  	require.Equal(t.t, code, htlc.Failure.Code, "unexpected failure code")
  1069  }
  1070  
  1071  func assertChannelConstraintsEqual(
  1072  	t *harnessTest, want, got *lnrpc.ChannelConstraints) {
  1073  
  1074  	t.t.Helper()
  1075  
  1076  	require.Equal(t.t, want.CsvDelay, got.CsvDelay, "CsvDelay mismatched")
  1077  	require.Equal(
  1078  		t.t, want.ChanReserveAtoms, got.ChanReserveAtoms,
  1079  		"ChanReserveAtoms mismatched",
  1080  	)
  1081  	require.Equal(
  1082  		t.t, want.DustLimitAtoms, got.DustLimitAtoms,
  1083  		"DustLimitAtoms mismatched",
  1084  	)
  1085  	require.Equal(
  1086  		t.t, want.MaxPendingAmtMAtoms, got.MaxPendingAmtMAtoms,
  1087  		"MaxPendingAmtMAtoms mismatched",
  1088  	)
  1089  	require.Equal(
  1090  		t.t, want.MinHtlcMAtoms, got.MinHtlcMAtoms,
  1091  		"MinHtlcMAtoms mismatched",
  1092  	)
  1093  	require.Equal(
  1094  		t.t, want.MaxAcceptedHtlcs, got.MaxAcceptedHtlcs,
  1095  		"MaxAcceptedHtlcs mismatched",
  1096  	)
  1097  }
  1098  
  1099  // assertAmountPaid checks that the ListChannels command of the provided
  1100  // node list the total amount sent and received as expected for the
  1101  // provided channel.
  1102  func assertAmountPaid(t *harnessTest, channelName string,
  1103  	node *lntest.HarnessNode, chanPoint wire.OutPoint, amountSent,
  1104  	amountReceived int64) {
  1105  	ctxb := context.Background()
  1106  
  1107  	checkAmountPaid := func() error {
  1108  		listReq := &lnrpc.ListChannelsRequest{}
  1109  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1110  		resp, err := node.ListChannels(ctxt, listReq)
  1111  		if err != nil {
  1112  			return fmt.Errorf("unable to for node's "+
  1113  				"channels: %v", err)
  1114  		}
  1115  		for _, channel := range resp.Channels {
  1116  			if channel.ChannelPoint != chanPoint.String() {
  1117  				continue
  1118  			}
  1119  
  1120  			if channel.TotalAtomsSent != amountSent {
  1121  				return fmt.Errorf("%v: incorrect amount"+
  1122  					" sent: %v != %v", channelName,
  1123  					channel.TotalAtomsSent,
  1124  					amountSent)
  1125  			}
  1126  			if channel.TotalAtomsReceived !=
  1127  				amountReceived {
  1128  				return fmt.Errorf("%v: incorrect amount"+
  1129  					" received: %v != %v",
  1130  					channelName,
  1131  					channel.TotalAtomsReceived,
  1132  					amountReceived)
  1133  			}
  1134  
  1135  			return nil
  1136  		}
  1137  		return fmt.Errorf("channel not found")
  1138  	}
  1139  
  1140  	// As far as HTLC inclusion in commitment transaction might be
  1141  	// postponed we will try to check the balance couple of times,
  1142  	// and then if after some period of time we receive wrong
  1143  	// balance return the error.
  1144  	// TODO(roasbeef): remove sleep after invoice notification hooks
  1145  	// are in place
  1146  	var timeover uint32
  1147  	go func() {
  1148  		<-time.After(defaultTimeout)
  1149  		atomic.StoreUint32(&timeover, 1)
  1150  	}()
  1151  
  1152  	for {
  1153  		isTimeover := atomic.LoadUint32(&timeover) == 1
  1154  		if err := checkAmountPaid(); err != nil {
  1155  			require.Falsef(
  1156  				t.t, isTimeover,
  1157  				"Check amount Paid failed: %v", err,
  1158  			)
  1159  		} else {
  1160  			break
  1161  		}
  1162  	}
  1163  }
  1164  
  1165  // assertNumPendingChannels checks that a PendingChannels response from the
  1166  // node reports the expected number of pending channels.
  1167  func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode,
  1168  	expWaitingClose, expPendingForceClose, expPendingClosing,
  1169  	expPendingOpen int) {
  1170  
  1171  	ctxb := context.Background()
  1172  
  1173  	var predErr error
  1174  	err := wait.Predicate(func() bool {
  1175  		pendingChansRequest := &lnrpc.PendingChannelsRequest{}
  1176  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1177  		pendingChanResp, err := node.PendingChannels(ctxt,
  1178  			pendingChansRequest)
  1179  		if err != nil {
  1180  			predErr = fmt.Errorf("unable to query for pending "+
  1181  				"channels: %v", err)
  1182  			return false
  1183  		}
  1184  		n := len(pendingChanResp.WaitingCloseChannels)
  1185  		if n != expWaitingClose {
  1186  			predErr = fmt.Errorf("expected to find %d channels "+
  1187  				"waiting close, found %d", expWaitingClose, n)
  1188  			return false
  1189  		}
  1190  		n = len(pendingChanResp.PendingForceClosingChannels)
  1191  		if n != expPendingForceClose {
  1192  			predErr = fmt.Errorf("expected to find %d channel "+
  1193  				"pending force close, found %d", expPendingForceClose, n)
  1194  			return false
  1195  		}
  1196  
  1197  		n = len(pendingChanResp.PendingClosingChannels)
  1198  		if n != expPendingClosing {
  1199  			predErr = fmt.Errorf("expected to find %d channels "+
  1200  				"pending closing, found %d", expPendingClosing,
  1201  				n)
  1202  		}
  1203  
  1204  		n = len(pendingChanResp.PendingOpenChannels)
  1205  		if n != expPendingOpen {
  1206  			predErr = fmt.Errorf("expected to find %d channels "+
  1207  				"pending open, found %d", expPendingOpen, n)
  1208  		}
  1209  		return true
  1210  	}, defaultTimeout)
  1211  	require.NoErrorf(t.t, err, "got err: %v", predErr)
  1212  }
  1213  
  1214  // assertDLPExecuted asserts that Dave is a node that has recovered their state
  1215  // form scratch. Carol should then force close on chain, with Dave sweeping his
  1216  // funds immediately, and Carol sweeping her fund after her CSV delay is up. If
  1217  // the blankSlate value is true, then this means that Dave won't need to sweep
  1218  // on chain as he has no funds in the channel.
  1219  func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest,
  1220  	carol *lntest.HarnessNode, carolStartingBalance int64,
  1221  	dave *lntest.HarnessNode, daveStartingBalance int64,
  1222  	commitType lnrpc.CommitmentType) {
  1223  
  1224  	// Increase the fee estimate so that the following force close tx will
  1225  	// be cpfp'ed.
  1226  	net.SetFeeEstimate(30000)
  1227  
  1228  	// We disabled auto-reconnect for some tests to avoid timing issues.
  1229  	// To make sure the nodes are initiating DLP now, we have to manually
  1230  	// re-connect them.
  1231  	ctxb := context.Background()
  1232  	net.EnsureConnected(t.t, carol, dave)
  1233  
  1234  	// Upon reconnection, the nodes should detect that Dave is out of sync.
  1235  	// Carol should force close the channel using her latest commitment.
  1236  	expectedTxes := 1
  1237  	if commitTypeHasAnchors(commitType) {
  1238  		// The two expected transactions for Carol are the force close
  1239  		// transaction and the anchor sweep transaction.
  1240  		expectedTxes = 2
  1241  	}
  1242  	_, err := waitForNTxsInMempool(
  1243  		net.Miner.Node, expectedTxes, minerMempoolTimeout,
  1244  	)
  1245  	require.NoError(
  1246  		t.t, err,
  1247  		"unable to find Carol's force close tx in mempool",
  1248  	)
  1249  
  1250  	// Channel should be in the state "waiting close" for Carol since she
  1251  	// broadcasted the force close tx.
  1252  	assertNumPendingChannels(t, carol, 1, 0, 0, 0)
  1253  
  1254  	// Dave should also consider the channel "waiting close", as he noticed
  1255  	// the channel was out of sync, and is now waiting for a force close to
  1256  	// hit the chain.
  1257  	assertNumPendingChannels(t, dave, 1, 0, 0, 0)
  1258  
  1259  	// Restart Dave to make sure he is able to sweep the funds after
  1260  	// shutdown.
  1261  	require.NoError(t.t, net.RestartNode(dave, nil), "Node restart failed")
  1262  
  1263  	// Generate a single block, which should confirm the closing tx.
  1264  	_ = mineBlocks(t, net, 1, expectedTxes)[0]
  1265  
  1266  	// Dave should consider the channel pending force close (since he is
  1267  	// waiting for his sweep to confirm).
  1268  	assertNumPendingChannels(t, dave, 0, 1, 0, 0)
  1269  
  1270  	// Carol is considering it "pending force close", as we must wait
  1271  	// before she can sweep her outputs.
  1272  	assertNumPendingChannels(t, carol, 0, 1, 0, 0)
  1273  
  1274  	if commitType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
  1275  		// Dave should sweep his anchor only, since he still has the
  1276  		// lease CLTV constraint on his commitment output.
  1277  		_, err = waitForNTxsInMempool(
  1278  			net.Miner.Node, 1, minerMempoolTimeout,
  1279  		)
  1280  		require.NoError(t.t, err, "unable to find Dave's anchor sweep "+
  1281  			"tx in mempool")
  1282  
  1283  		// Mine Dave's anchor sweep tx.
  1284  		_ = mineBlocks(t, net, 1, 1)[0]
  1285  
  1286  		// After Carol's output matures, she should also reclaim her
  1287  		// funds.
  1288  		//
  1289  		// The commit sweep resolver publishes the sweep tx at
  1290  		// defaultCSV-1 and we already mined one block after the
  1291  		// commitmment was published, so take that into account.
  1292  		mineBlocks(t, net, defaultCSV-1-1, 0)
  1293  		carolSweep, err := waitForTxInMempool(
  1294  			net.Miner.Node, minerMempoolTimeout,
  1295  		)
  1296  		require.NoError(t.t, err, "unable to find Carol's sweep tx in "+
  1297  			"mempool")
  1298  		block := mineBlocks(t, net, 1, 1)[0]
  1299  		assertTxInBlock(t, block, carolSweep)
  1300  
  1301  		// Now the channel should be fully closed also from Carol's POV.
  1302  		assertNumPendingChannels(t, carol, 0, 0, 0, 0)
  1303  
  1304  		// We'll now mine the remaining blocks to prompt Dave to sweep
  1305  		// his CLTV-constrained output.
  1306  		ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
  1307  		defer cancel()
  1308  		resp, err := dave.PendingChannels(
  1309  			ctxt, &lnrpc.PendingChannelsRequest{},
  1310  		)
  1311  		require.NoError(t.t, err)
  1312  		blocksTilMaturity :=
  1313  			resp.PendingForceClosingChannels[0].BlocksTilMaturity
  1314  		require.Positive(t.t, blocksTilMaturity)
  1315  
  1316  		mineBlocks(t, net, uint32(blocksTilMaturity), 0)
  1317  		daveSweep, err := waitForTxInMempool(
  1318  			net.Miner.Node, minerMempoolTimeout,
  1319  		)
  1320  		require.NoError(t.t, err, "unable to find Dave's sweep tx in "+
  1321  			"mempool")
  1322  		block = mineBlocks(t, net, 1, 1)[0]
  1323  		assertTxInBlock(t, block, daveSweep)
  1324  
  1325  		// Now Dave should consider the channel fully closed.
  1326  		assertNumPendingChannels(t, dave, 0, 0, 0, 0)
  1327  	} else {
  1328  		// Dave should sweep his funds immediately, as they are not
  1329  		// timelocked. We also expect Dave to sweep his anchor, if
  1330  		// present.
  1331  		_, err = waitForNTxsInMempool(
  1332  			net.Miner.Node, expectedTxes, minerMempoolTimeout,
  1333  		)
  1334  		require.NoError(t.t, err, "unable to find Dave's sweep tx in "+
  1335  			"mempool")
  1336  
  1337  		// Mine the sweep tx.
  1338  		_ = mineBlocks(t, net, 1, expectedTxes)[0]
  1339  
  1340  		// Now Dave should consider the channel fully closed.
  1341  		assertNumPendingChannels(t, dave, 0, 0, 0, 0)
  1342  
  1343  		// After Carol's output matures, she should also reclaim her
  1344  		// funds.
  1345  		//
  1346  		// The commit sweep resolver publishes the sweep tx at
  1347  		// defaultCSV-1 and we already mined one block after the
  1348  		// commitmment was published, so take that into account.
  1349  		mineBlocks(t, net, defaultCSV-1-1, 0)
  1350  		carolSweep, err := waitForTxInMempool(
  1351  			net.Miner.Node, minerMempoolTimeout,
  1352  		)
  1353  		require.NoError(t.t, err, "unable to find Carol's sweep tx in "+
  1354  			"mempool")
  1355  		block := mineBlocks(t, net, 1, 1)[0]
  1356  		assertTxInBlock(t, block, carolSweep)
  1357  
  1358  		// Now the channel should be fully closed also from Carol's POV.
  1359  		assertNumPendingChannels(t, carol, 0, 0, 0, 0)
  1360  	}
  1361  
  1362  	// We query Dave's balance to make sure it increased after the channel
  1363  	// closed. This checks that he was able to sweep the funds he had in
  1364  	// the channel.
  1365  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1366  	balReq := &lnrpc.WalletBalanceRequest{}
  1367  	daveBalResp, err := dave.WalletBalance(ctxt, balReq)
  1368  	require.NoError(t.t, err, "unable to get dave's balance")
  1369  
  1370  	daveBalance := daveBalResp.ConfirmedBalance
  1371  	require.Greater(
  1372  		t.t, daveBalance, daveStartingBalance, "balance not increased",
  1373  	)
  1374  
  1375  	// Make sure Carol got her balance back.
  1376  	err = wait.NoError(func() error {
  1377  		ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
  1378  		carolBalResp, err := carol.WalletBalance(ctxt, balReq)
  1379  		if err != nil {
  1380  			return fmt.Errorf("unable to get carol's balance: %v", err)
  1381  		}
  1382  
  1383  		carolBalance := carolBalResp.ConfirmedBalance
  1384  
  1385  		// With Neutrino we don't get a backend error when trying to
  1386  		// publish an orphan TX (which is what the sweep for the remote
  1387  		// anchor is since the remote commitment TX was not broadcast).
  1388  		// That's why the wallet still sees that as unconfirmed and we
  1389  		// need to count the total balance instead of the confirmed.
  1390  		if net.BackendCfg.Name() == "spv" {
  1391  			carolBalance = carolBalResp.TotalBalance
  1392  		}
  1393  
  1394  		if carolBalance <= carolStartingBalance {
  1395  			return fmt.Errorf("expected carol to have balance "+
  1396  				"above %d, instead had %v", carolStartingBalance,
  1397  				carolBalance)
  1398  		}
  1399  
  1400  		return nil
  1401  	}, defaultTimeout)
  1402  	require.NoError(t.t, err)
  1403  
  1404  	assertNodeNumChannels(t, dave, 0)
  1405  	assertNodeNumChannels(t, carol, 0)
  1406  }
  1407  
  1408  func assertTimeLockSwept(net *lntest.NetworkHarness, t *harnessTest,
  1409  	carol *lntest.HarnessNode, carolStartingBalance int64,
  1410  	dave *lntest.HarnessNode, daveStartingBalance int64,
  1411  	anchors bool) {
  1412  
  1413  	ctxb := context.Background()
  1414  	expectedTxes := 2
  1415  	if anchors {
  1416  		// On Decred, the rate is low enough that the anchor output
  1417  		// can be swept on the same transaction in this scenario.
  1418  		expectedTxes = 2
  1419  	}
  1420  
  1421  	// Carol should sweep her funds immediately, as they are not timelocked.
  1422  	// We also expect Carol and Dave to sweep their anchor, if present.
  1423  	_, err := waitForNTxsInMempool(
  1424  		net.Miner.Node, expectedTxes, minerMempoolTimeout,
  1425  	)
  1426  	require.NoError(t.t, err, "unable to find Carol's sweep tx in mempool")
  1427  
  1428  	// Carol should consider the channel pending force close (since she is
  1429  	// waiting for her sweep to confirm).
  1430  	assertNumPendingChannels(t, carol, 0, 1, 0, 0)
  1431  
  1432  	// Dave is considering it "pending force close", as we must wait
  1433  	// before he can sweep her outputs.
  1434  	assertNumPendingChannels(t, dave, 0, 1, 0, 0)
  1435  
  1436  	// Mine the sweep (and anchor) tx(ns).
  1437  	_ = mineBlocks(t, net, 1, expectedTxes)[0]
  1438  
  1439  	// Now Carol should consider the channel fully closed.
  1440  	assertNumPendingChannels(t, carol, 0, 0, 0, 0)
  1441  
  1442  	// We query Carol's balance to make sure it increased after the channel
  1443  	// closed. This checks that she was able to sweep the funds she had in
  1444  	// the channel.
  1445  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1446  	balReq := &lnrpc.WalletBalanceRequest{}
  1447  	carolBalResp, err := carol.WalletBalance(ctxt, balReq)
  1448  	require.NoError(t.t, err, "unable to get Carol's balance")
  1449  
  1450  	carolBalance := carolBalResp.ConfirmedBalance
  1451  	require.Greater(
  1452  		t.t, carolBalance, carolStartingBalance, "balance not increased",
  1453  	)
  1454  
  1455  	// After the Dave's output matures, he should reclaim his funds.
  1456  	//
  1457  	// The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
  1458  	// we already mined one block after the commitment was published, so
  1459  	// take that into account.
  1460  	mineBlocks(t, net, defaultCSV-1-1, 0)
  1461  	daveSweep, err := waitForTxInMempool(
  1462  		net.Miner.Node, minerMempoolTimeout,
  1463  	)
  1464  	require.NoError(t.t, err, "unable to find Dave's sweep tx in mempool")
  1465  	block := mineBlocks(t, net, 1, 1)[0]
  1466  	assertTxInBlock(t, block, daveSweep)
  1467  
  1468  	// Now the channel should be fully closed also from Dave's POV.
  1469  	assertNumPendingChannels(t, dave, 0, 0, 0, 0)
  1470  
  1471  	// Make sure Dave got his balance back.
  1472  	err = wait.NoError(func() error {
  1473  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1474  		daveBalResp, err := dave.WalletBalance(ctxt, balReq)
  1475  		if err != nil {
  1476  			return fmt.Errorf("unable to get Dave's balance: %v",
  1477  				err)
  1478  		}
  1479  
  1480  		daveBalance := daveBalResp.ConfirmedBalance
  1481  		if daveBalance <= daveStartingBalance {
  1482  			return fmt.Errorf("expected dave to have balance "+
  1483  				"above %d, instead had %v", daveStartingBalance,
  1484  				daveBalance)
  1485  		}
  1486  
  1487  		return nil
  1488  	}, defaultTimeout)
  1489  	require.NoError(t.t, err)
  1490  
  1491  	assertNodeNumChannels(t, dave, 0)
  1492  	assertNodeNumChannels(t, carol, 0)
  1493  }
  1494  
  1495  // verifyCloseUpdate is used to verify that a closed channel update is of the
  1496  // expected type.
  1497  func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
  1498  	closeType lnrpc.ChannelCloseSummary_ClosureType,
  1499  	closeInitiator lnrpc.Initiator) error {
  1500  
  1501  	// We should receive one inactive and one closed notification
  1502  	// for each channel.
  1503  	switch update := chanUpdate.Channel.(type) {
  1504  	case *lnrpc.ChannelEventUpdate_InactiveChannel:
  1505  		if chanUpdate.Type != lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL {
  1506  			return fmt.Errorf("update type mismatch: expected %v, got %v",
  1507  				lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
  1508  				chanUpdate.Type)
  1509  		}
  1510  
  1511  	case *lnrpc.ChannelEventUpdate_ClosedChannel:
  1512  		if chanUpdate.Type !=
  1513  			lnrpc.ChannelEventUpdate_CLOSED_CHANNEL {
  1514  			return fmt.Errorf("update type mismatch: expected %v, got %v",
  1515  				lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
  1516  				chanUpdate.Type)
  1517  		}
  1518  
  1519  		if update.ClosedChannel.CloseType != closeType {
  1520  			return fmt.Errorf("channel closure type "+
  1521  				"mismatch: expected %v, got %v",
  1522  				closeType,
  1523  				update.ClosedChannel.CloseType)
  1524  		}
  1525  
  1526  		if update.ClosedChannel.CloseInitiator != closeInitiator {
  1527  			return fmt.Errorf("expected close intiator: %v, got: %v",
  1528  				closeInitiator,
  1529  				update.ClosedChannel.CloseInitiator)
  1530  		}
  1531  
  1532  	case *lnrpc.ChannelEventUpdate_FullyResolvedChannel:
  1533  		if chanUpdate.Type != lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL {
  1534  			return fmt.Errorf("update type mismatch: expected %v, got %v",
  1535  				lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
  1536  				chanUpdate.Type)
  1537  		}
  1538  
  1539  	default:
  1540  		return fmt.Errorf("channel update channel of wrong type, "+
  1541  			"expected closed channel, got %T",
  1542  			update)
  1543  	}
  1544  
  1545  	return nil
  1546  }
  1547  
  1548  // assertNodeNumChannels polls the provided node's list channels rpc until it
  1549  // reaches the desired number of total channels.
  1550  func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode,
  1551  	numChannels int) {
  1552  	ctxb := context.Background()
  1553  
  1554  	// Poll node for its list of channels.
  1555  	req := &lnrpc.ListChannelsRequest{}
  1556  
  1557  	var predErr error
  1558  	pred := func() bool {
  1559  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1560  		chanInfo, err := node.ListChannels(ctxt, req)
  1561  		if err != nil {
  1562  			predErr = fmt.Errorf("unable to query for node's "+
  1563  				"channels: %v", err)
  1564  			return false
  1565  		}
  1566  
  1567  		// Return true if the query returned the expected number of
  1568  		// channels.
  1569  		num := len(chanInfo.Channels)
  1570  		if num != numChannels {
  1571  			predErr = fmt.Errorf("expected %v channels, got %v",
  1572  				numChannels, num)
  1573  			return false
  1574  		}
  1575  		return true
  1576  	}
  1577  
  1578  	require.NoErrorf(
  1579  		t.t, wait.Predicate(pred, defaultTimeout),
  1580  		"node has incorrect number of channels: %v", predErr,
  1581  	)
  1582  }
  1583  
  1584  func assertSyncType(t *harnessTest, node *lntest.HarnessNode,
  1585  	peer string, syncType lnrpc.Peer_SyncType) {
  1586  
  1587  	t.t.Helper()
  1588  
  1589  	ctxb := context.Background()
  1590  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1591  	resp, err := node.ListPeers(ctxt, &lnrpc.ListPeersRequest{})
  1592  	require.NoError(t.t, err)
  1593  
  1594  	for _, rpcPeer := range resp.Peers {
  1595  		if rpcPeer.PubKey != peer {
  1596  			continue
  1597  		}
  1598  
  1599  		require.Equal(t.t, syncType, rpcPeer.SyncType)
  1600  		return
  1601  	}
  1602  
  1603  	t.t.Fatalf("unable to find peer: %s", peer)
  1604  }
  1605  
  1606  // assertActiveHtlcs makes sure all the passed nodes have the _exact_ HTLCs
  1607  // matching payHashes on _all_ their channels.
  1608  func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error {
  1609  	ctxb := context.Background()
  1610  
  1611  	req := &lnrpc.ListChannelsRequest{}
  1612  	for _, node := range nodes {
  1613  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1614  		nodeChans, err := node.ListChannels(ctxt, req)
  1615  		if err != nil {
  1616  			return fmt.Errorf("unable to get node chans: %v", err)
  1617  		}
  1618  
  1619  		for _, channel := range nodeChans.Channels {
  1620  			// Record all payment hashes active for this channel.
  1621  			htlcHashes := make(map[string]struct{})
  1622  			for _, htlc := range channel.PendingHtlcs {
  1623  				h := hex.EncodeToString(htlc.HashLock)
  1624  				_, ok := htlcHashes[h]
  1625  				if ok {
  1626  					return fmt.Errorf("duplicate HashLock")
  1627  				}
  1628  				htlcHashes[h] = struct{}{}
  1629  			}
  1630  
  1631  			// Channel should have exactly the payHashes active.
  1632  			if len(payHashes) != len(htlcHashes) {
  1633  				return fmt.Errorf("node [%s:%x] had %v "+
  1634  					"htlcs active, expected %v",
  1635  					node.Cfg.Name, node.PubKey[:],
  1636  					len(htlcHashes), len(payHashes))
  1637  			}
  1638  
  1639  			// Make sure all the payHashes are active.
  1640  			for _, payHash := range payHashes {
  1641  				h := hex.EncodeToString(payHash)
  1642  				if _, ok := htlcHashes[h]; ok {
  1643  					continue
  1644  				}
  1645  				return fmt.Errorf("node [%s:%x] didn't have: "+
  1646  					"the payHash %v active", node.Cfg.Name,
  1647  					node.PubKey[:], h)
  1648  			}
  1649  		}
  1650  	}
  1651  
  1652  	return nil
  1653  }
  1654  
  1655  func assertNumActiveHtlcsChanPoint(node *lntest.HarnessNode,
  1656  	chanPoint wire.OutPoint, numHtlcs int) error {
  1657  
  1658  	ctxb := context.Background()
  1659  
  1660  	req := &lnrpc.ListChannelsRequest{}
  1661  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1662  	nodeChans, err := node.ListChannels(ctxt, req)
  1663  	if err != nil {
  1664  		return err
  1665  	}
  1666  
  1667  	for _, channel := range nodeChans.Channels {
  1668  		if channel.ChannelPoint != chanPoint.String() {
  1669  			continue
  1670  		}
  1671  
  1672  		if len(channel.PendingHtlcs) != numHtlcs {
  1673  			return fmt.Errorf("expected %v active HTLCs, got %v",
  1674  				numHtlcs, len(channel.PendingHtlcs))
  1675  		}
  1676  		return nil
  1677  	}
  1678  
  1679  	return fmt.Errorf("channel point %v not found", chanPoint)
  1680  }
  1681  
  1682  func assertNumActiveHtlcs(nodes []*lntest.HarnessNode, numHtlcs int) error {
  1683  	ctxb := context.Background()
  1684  
  1685  	req := &lnrpc.ListChannelsRequest{}
  1686  	for _, node := range nodes {
  1687  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1688  		nodeChans, err := node.ListChannels(ctxt, req)
  1689  		if err != nil {
  1690  			return err
  1691  		}
  1692  
  1693  		for _, channel := range nodeChans.Channels {
  1694  			if len(channel.PendingHtlcs) != numHtlcs {
  1695  				return fmt.Errorf("expected %v HTLCs, got %v",
  1696  					numHtlcs, len(channel.PendingHtlcs))
  1697  			}
  1698  		}
  1699  	}
  1700  
  1701  	return nil
  1702  }
  1703  
  1704  func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client,
  1705  	timeout time.Duration, inputs ...wire.OutPoint) chainhash.Hash {
  1706  
  1707  	tx := getSpendingTxInMempool(t, miner, timeout, inputs...)
  1708  	return tx.TxHash()
  1709  }
  1710  
  1711  // getSpendingTxInMempool waits for a transaction spending the given outpoint to
  1712  // appear in the mempool and returns that tx in full.
  1713  func getSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client,
  1714  	timeout time.Duration, inputs ...wire.OutPoint) *wire.MsgTx {
  1715  
  1716  	inputSet := make(map[wire.OutPoint]struct{}, len(inputs))
  1717  	breakTimeout := time.After(timeout)
  1718  	ticker := time.NewTicker(50 * time.Millisecond)
  1719  	defer ticker.Stop()
  1720  
  1721  	for {
  1722  		select {
  1723  		case <-breakTimeout:
  1724  			t.Fatalf("didn't find tx in mempool")
  1725  		case <-ticker.C:
  1726  			mempool, err := miner.GetRawMempool(context.Background(), jsonrpctypes.GRMRegular)
  1727  			require.NoError(t.t, err, "unable to get mempool")
  1728  
  1729  			if len(mempool) == 0 {
  1730  				continue
  1731  			}
  1732  
  1733  			for _, txid := range mempool {
  1734  				tx, err := miner.GetRawTransaction(context.Background(), txid)
  1735  				require.NoError(t.t, err, "unable to fetch tx")
  1736  				msgTx := tx.MsgTx()
  1737  
  1738  				// Include the inputs again in case they were
  1739  				// removed in a previous iteration.
  1740  				for _, input := range inputs {
  1741  					inputSet[input] = struct{}{}
  1742  				}
  1743  
  1744  				for _, txIn := range msgTx.TxIn {
  1745  					input := txIn.PreviousOutPoint
  1746  					delete(inputSet, input)
  1747  				}
  1748  
  1749  				if len(inputSet) > 0 {
  1750  					// Missing input, check next transaction
  1751  					// or try again.
  1752  					continue
  1753  				}
  1754  
  1755  				// Transaction spends all expected inputs,
  1756  				// return.
  1757  				return msgTx
  1758  			}
  1759  		}
  1760  	}
  1761  }
  1762  
  1763  // assertTxLabel is a helper function which finds a target tx in our set
  1764  // of transactions and checks that it has the desired label.
  1765  func assertTxLabel(t *harnessTest, node *lntest.HarnessNode,
  1766  	targetTx, label string) {
  1767  
  1768  	// List all transactions relevant to our wallet, and find the tx so that
  1769  	// we can check the correct label has been set.
  1770  	ctxb := context.Background()
  1771  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
  1772  	defer cancel()
  1773  
  1774  	txResp, err := node.GetTransactions(
  1775  		ctxt, &lnrpc.GetTransactionsRequest{},
  1776  	)
  1777  	require.NoError(t.t, err, "could not get transactions")
  1778  
  1779  	// Find our transaction in the set of transactions returned and check
  1780  	// its label.
  1781  	for _, txn := range txResp.Transactions {
  1782  		if txn.TxHash == targetTx {
  1783  			// This test is ignored in dcrlnd because dcrwallet
  1784  			// does not support tx labeling.
  1785  			// require.Equal(t.t, label, txn.Label, "labels not match")
  1786  		}
  1787  	}
  1788  }
  1789  
  1790  // sendAndAssertSuccess sends the given payment requests and asserts that the
  1791  // payment completes successfully.
  1792  func sendAndAssertSuccess(t *harnessTest, node *lntest.HarnessNode,
  1793  	req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
  1794  
  1795  	ctxb := context.Background()
  1796  	ctx, cancel := context.WithTimeout(ctxb, defaultTimeout)
  1797  	defer cancel()
  1798  
  1799  	var result *lnrpc.Payment
  1800  	err := wait.NoError(func() error {
  1801  		stream, err := node.RouterClient.SendPaymentV2(ctx, req)
  1802  		if err != nil {
  1803  			return fmt.Errorf("unable to send payment: %v", err)
  1804  		}
  1805  
  1806  		result, err = getPaymentResult(stream)
  1807  		if err != nil {
  1808  			return fmt.Errorf("unable to get payment result: %v",
  1809  				err)
  1810  		}
  1811  
  1812  		if result.Status != lnrpc.Payment_SUCCEEDED {
  1813  			return fmt.Errorf("payment failed: %v", result.Status)
  1814  		}
  1815  
  1816  		return nil
  1817  	}, defaultTimeout)
  1818  	require.NoError(t.t, err)
  1819  
  1820  	return result
  1821  }
  1822  
  1823  // sendAndAssertFailure sends the given payment requests and asserts that the
  1824  // payment fails with the expected reason.
  1825  func sendAndAssertFailure(t *harnessTest, node *lntest.HarnessNode,
  1826  	req *routerrpc.SendPaymentRequest,
  1827  	failureReason lnrpc.PaymentFailureReason) *lnrpc.Payment {
  1828  
  1829  	ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
  1830  	defer cancel()
  1831  
  1832  	stream, err := node.RouterClient.SendPaymentV2(ctx, req)
  1833  	require.NoError(t.t, err, "unable to send payment")
  1834  
  1835  	result, err := getPaymentResult(stream)
  1836  	require.NoError(t.t, err, "unable to get payment result")
  1837  
  1838  	require.Equal(
  1839  		t.t, lnrpc.Payment_FAILED, result.Status,
  1840  		"payment was expected to fail, but succeeded",
  1841  	)
  1842  
  1843  	require.Equal(
  1844  		t.t, failureReason, result.FailureReason,
  1845  		"payment failureReason not matched",
  1846  	)
  1847  
  1848  	return result
  1849  }
  1850  
  1851  // getPaymentResult reads a final result from the stream and returns it.
  1852  func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) (
  1853  	*lnrpc.Payment, error) {
  1854  
  1855  	for {
  1856  		payment, err := stream.Recv()
  1857  		if err != nil {
  1858  			return nil, err
  1859  		}
  1860  
  1861  		if payment.Status != lnrpc.Payment_IN_FLIGHT {
  1862  			return payment, nil
  1863  		}
  1864  	}
  1865  }
  1866  
  1867  // assertNumUTXOs waits for the given number of UTXOs to be available or fails
  1868  // if that isn't the case before the default timeout.
  1869  func assertNumUTXOs(t *testing.T, node *lntest.HarnessNode, expectedUtxos int) {
  1870  	ctxb := context.Background()
  1871  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
  1872  	defer cancel()
  1873  	err := wait.NoError(func() error {
  1874  		resp, err := node.ListUnspent( // nolint:staticcheck
  1875  			ctxt, &lnrpc.ListUnspentRequest{
  1876  				MinConfs: 1,
  1877  				MaxConfs: math.MaxInt32,
  1878  			},
  1879  		)
  1880  		if err != nil {
  1881  			return fmt.Errorf("error listing unspent: %v", err)
  1882  		}
  1883  
  1884  		if len(resp.Utxos) != expectedUtxos {
  1885  			return fmt.Errorf("not enough UTXOs, got %d wanted %d",
  1886  				len(resp.Utxos), expectedUtxos)
  1887  		}
  1888  
  1889  		return nil
  1890  	}, defaultTimeout)
  1891  	require.NoError(t, err, "wait for listunspent")
  1892  }
  1893  
  1894  // assertChannelPolicyUpdate checks that the required policy update has
  1895  // happened on the given node.
  1896  func assertChannelPolicyUpdate(t *testing.T, node *lntest.HarnessNode,
  1897  	advertisingNode string, policy *lnrpc.RoutingPolicy,
  1898  	chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) {
  1899  
  1900  	require.NoError(
  1901  		t, node.WaitForChannelPolicyUpdate(
  1902  			advertisingNode, policy,
  1903  			chanPoint, includeUnannounced,
  1904  		), "error while waiting for channel update",
  1905  	)
  1906  }
  1907  
  1908  func transactionInWallet(node *lntest.HarnessNode, txid chainhash.Hash) bool {
  1909  	txStr := txid.String()
  1910  
  1911  	txResp, err := node.GetTransactions(
  1912  		context.Background(), &lnrpc.GetTransactionsRequest{},
  1913  	)
  1914  	if err != nil {
  1915  		return false
  1916  	}
  1917  
  1918  	for _, txn := range txResp.Transactions {
  1919  		if txn.TxHash == txStr {
  1920  			return true
  1921  		}
  1922  	}
  1923  
  1924  	return false
  1925  }
  1926  
  1927  func assertTransactionInWallet(t *testing.T, node *lntest.HarnessNode, txID chainhash.Hash) {
  1928  	t.Helper()
  1929  
  1930  	err := wait.Predicate(func() bool {
  1931  		return transactionInWallet(node, txID)
  1932  	}, defaultTimeout)
  1933  	require.NoError(
  1934  		t, err, fmt.Sprintf("transaction %v not found in wallet", txID),
  1935  	)
  1936  }
  1937  
  1938  func assertTransactionNotInWallet(t *testing.T, node *lntest.HarnessNode,
  1939  	txID chainhash.Hash) {
  1940  
  1941  	t.Helper()
  1942  
  1943  	err := wait.Predicate(func() bool {
  1944  		return !transactionInWallet(node, txID)
  1945  	}, defaultTimeout)
  1946  	require.NoError(
  1947  		t, err, fmt.Sprintf("transaction %v found in wallet", txID),
  1948  	)
  1949  }
  1950  
  1951  func assertAnchorOutputLost(t *harnessTest, node *lntest.HarnessNode,
  1952  	chanPoint wire.OutPoint) {
  1953  
  1954  	pendingChansRequest := &lnrpc.PendingChannelsRequest{}
  1955  	err := wait.Predicate(func() bool {
  1956  		resp, pErr := node.PendingChannels(
  1957  			context.Background(), pendingChansRequest,
  1958  		)
  1959  		if pErr != nil {
  1960  			return false
  1961  		}
  1962  
  1963  		for _, pendingChan := range resp.PendingForceClosingChannels {
  1964  			if pendingChan.Channel.ChannelPoint == chanPoint.String() {
  1965  				return (pendingChan.Anchor ==
  1966  					lnrpc.PendingChannelsResponse_ForceClosedChannel_LOST)
  1967  			}
  1968  		}
  1969  
  1970  		return false
  1971  	}, defaultTimeout)
  1972  	require.NoError(t.t, err, "anchor doesn't show as being lost")
  1973  }