github.com/decred/dcrlnd@v0.7.6/lntest/itest/lnd_open_channel_test.go (about)

     1  package itest
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"time"
     7  
     8  	"github.com/decred/dcrd/chaincfg/chainhash"
     9  	"github.com/decred/dcrd/dcrutil/v4"
    10  	"github.com/decred/dcrlnd/lnrpc"
    11  	"github.com/decred/dcrlnd/lntest"
    12  	"github.com/decred/dcrlnd/lntest/wait"
    13  	rpctest "github.com/decred/dcrtest/dcrdtest"
    14  	"github.com/stretchr/testify/require"
    15  	"matheusd.com/testctx"
    16  )
    17  
    18  // testOpenChannelAfterReorg tests that in the case where we have an open
    19  // channel where the funding tx gets reorged out, the channel will no
    20  // longer be present in the node's routing table.
    21  func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
    22  	var ctxb = context.Background()
    23  
    24  	// Currently disabled due to
    25  	// https://github.com/decred/dcrwallet/issues/1710. Re-assess after
    26  	// that is fixed.
    27  	if net.BackendCfg.Name() == "spv" {
    28  		t.Skipf("Skipping for SPV for the moment")
    29  	}
    30  
    31  	// Set up a new miner that we can use to cause a reorg.
    32  	tempLogDir := ".tempminerlogs"
    33  	logFilename := "output-open_channel_reorg-temp_miner.log"
    34  	tempMiner, err := lntest.NewTempMiner(tempLogDir, logFilename)
    35  	require.NoError(t.t, err, "failed to create temp miner")
    36  	defer func() {
    37  		require.NoError(
    38  			t.t, tempMiner.Stop(),
    39  			"failed to clean up temp miner",
    40  		)
    41  	}()
    42  
    43  	// We start by connecting the new miner to our original miner, such
    44  	// that it will sync to our original chain.
    45  	if err := rpctest.ConnectNode(ctxb, net.Miner.Harness, tempMiner.Harness); err != nil {
    46  		t.Fatalf("unable to connect harnesses: %v", err)
    47  	}
    48  	nodeSlice := []*rpctest.Harness{net.Miner.Harness, tempMiner.Harness}
    49  	if err := rpctest.JoinNodes(testctx.New(t), nodeSlice, rpctest.Blocks); err != nil {
    50  		t.Fatalf("unable to join node on blocks: %v", err)
    51  	}
    52  
    53  	// The two miners should be on the same blockheight.
    54  	assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
    55  
    56  	// We disconnect the two nodes, such that we can start mining on them
    57  	// individually without the other one learning about the new blocks.
    58  	err = rpctest.RemoveNode(context.Background(), net.Miner.Harness, tempMiner.Harness)
    59  	if err != nil {
    60  		t.Fatalf("unable to remove node: %v", err)
    61  	}
    62  
    63  	// Create a new channel that requires 1 confs before it's considered
    64  	// open, then broadcast the funding transaction
    65  	chanAmt := defaultChanAmt
    66  	pushAmt := dcrutil.Amount(0)
    67  	pendingUpdate, err := net.OpenPendingChannel(
    68  		net.Alice, net.Bob, chanAmt, pushAmt,
    69  	)
    70  	if err != nil {
    71  		t.Fatalf("unable to open channel: %v", err)
    72  	}
    73  
    74  	// Wait for miner to have seen the funding tx. The temporary miner is
    75  	// disconnected, and won't see the transaction.
    76  	_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
    77  	if err != nil {
    78  		t.Fatalf("failed to find funding tx in mempool: %v", err)
    79  	}
    80  
    81  	// At this point, the channel's funding transaction will have been
    82  	// broadcast, but not confirmed, and the channel should be pending.
    83  	assertNumOpenChannelsPending(t, net.Alice, net.Bob, 1)
    84  
    85  	fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
    86  	if err != nil {
    87  		t.Fatalf("unable to convert funding txid into chainhash.Hash:"+
    88  			" %v", err)
    89  	}
    90  
    91  	// We now cause a fork, by letting our original miner mine 10 blocks,
    92  	// and our new miner mine 15. This will also confirm our pending
    93  	// channel on the original miner's chain, which should be considered
    94  	// open.
    95  	block := mineBlocks(t, net, 10, 1)[0]
    96  	assertTxInBlock(t, block, fundingTxID)
    97  	if _, err := rpctest.AdjustedSimnetMiner(ctxb, tempMiner.Node, 15); err != nil {
    98  		t.Fatalf("unable to generate blocks in temp miner: %v", err)
    99  	}
   100  
   101  	// Ensure the chain lengths are what we expect, with the temp miner
   102  	// being 5 blocks ahead.
   103  	assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 5)
   104  
   105  	// Wait for Alice to sync to the original miner's chain.
   106  	_, minerHeight, err := net.Miner.Node.GetBestBlock(context.Background())
   107  	if err != nil {
   108  		t.Fatalf("unable to get current blockheight %v", err)
   109  	}
   110  	err = waitForNodeBlockHeight(net.Alice, minerHeight)
   111  	if err != nil {
   112  		t.Fatalf("unable to sync to chain: %v", err)
   113  	}
   114  
   115  	chanPoint := &lnrpc.ChannelPoint{
   116  		FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
   117  			FundingTxidBytes: pendingUpdate.Txid,
   118  		},
   119  		OutputIndex: pendingUpdate.OutputIndex,
   120  	}
   121  
   122  	// Ensure channel is no longer pending.
   123  	assertNumOpenChannelsPending(t, net.Alice, net.Bob, 0)
   124  
   125  	// Wait for Alice and Bob to recognize and advertise the new channel
   126  	// generated above.
   127  	err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
   128  	if err != nil {
   129  		t.Fatalf("alice didn't advertise channel before "+
   130  			"timeout: %v", err)
   131  	}
   132  	err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
   133  	if err != nil {
   134  		t.Fatalf("bob didn't advertise channel before "+
   135  			"timeout: %v", err)
   136  	}
   137  
   138  	// Alice should now have 1 edge in her graph.
   139  	req := &lnrpc.ChannelGraphRequest{
   140  		IncludeUnannounced: true,
   141  	}
   142  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   143  	chanGraph, err := net.Alice.DescribeGraph(ctxt, req)
   144  	if err != nil {
   145  		t.Fatalf("unable to query for alice's routing table: %v", err)
   146  	}
   147  
   148  	numEdges := len(chanGraph.Edges)
   149  	if numEdges != 1 {
   150  		t.Fatalf("expected to find one edge in the graph, found %d",
   151  			numEdges)
   152  	}
   153  
   154  	// Let enough time pass for all wallet sync to be complete.
   155  	time.Sleep(3 * time.Second)
   156  
   157  	// Now we disconnect Alice's chain backend from the original miner, and
   158  	// connect the two miners together. Since the temporary miner knows
   159  	// about a longer chain, both miners should sync to that chain.
   160  	err = net.BackendCfg.DisconnectMiner()
   161  	if err != nil {
   162  		t.Fatalf("unable to remove node: %v", err)
   163  	}
   164  
   165  	// Connecting to the temporary miner should now cause our original
   166  	// chain to be re-orged out.
   167  	err = rpctest.ConnectNode(testctx.New(t), net.Miner.Harness, tempMiner.Harness)
   168  	if err != nil {
   169  		t.Fatalf("unable to connect nodes: %v", err)
   170  	}
   171  
   172  	nodes := []*rpctest.Harness{tempMiner.Harness, net.Miner.Harness}
   173  	if err := rpctest.JoinNodes(testctx.New(t), nodes, rpctest.Blocks); err != nil {
   174  		t.Fatalf("unable to join node on blocks: %v", err)
   175  	}
   176  
   177  	// Once again they should be on the same chain.
   178  	assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
   179  
   180  	// Now we disconnect the two miners, and connect our original miner to
   181  	// our chain backend once again.
   182  	err = rpctest.RemoveNode(context.Background(), net.Miner.Harness, tempMiner.Harness)
   183  	if err != nil {
   184  		t.Fatalf("unable to remove node: %v", err)
   185  	}
   186  
   187  	err = net.BackendCfg.ConnectMiner()
   188  	if err != nil {
   189  		t.Fatalf("unable to remove node: %v", err)
   190  	}
   191  
   192  	// This should have caused a reorg, and Alice should sync to the longer
   193  	// chain, where the funding transaction is not confirmed.
   194  	_, tempMinerHeight, err := tempMiner.Node.GetBestBlock(context.Background())
   195  	if err != nil {
   196  		t.Fatalf("unable to get current blockheight %v", err)
   197  	}
   198  	err = waitForNodeBlockHeight(net.Alice, tempMinerHeight)
   199  	if err != nil {
   200  		t.Fatalf("unable to sync to chain: %v", err)
   201  	}
   202  
   203  	// Since the fundingtx was reorged out, Alice should now have no edges
   204  	// in her graph.
   205  	req = &lnrpc.ChannelGraphRequest{
   206  		IncludeUnannounced: true,
   207  	}
   208  
   209  	var predErr error
   210  	err = wait.Predicate(func() bool {
   211  		ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
   212  		chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
   213  		if err != nil {
   214  			predErr = fmt.Errorf("unable to query for alice's routing table: %v", err)
   215  			return false
   216  		}
   217  
   218  		numEdges = len(chanGraph.Edges)
   219  		if numEdges != 0 {
   220  			predErr = fmt.Errorf("expected to find no edge in the graph, found %d",
   221  				numEdges)
   222  			return false
   223  		}
   224  		return true
   225  	}, defaultTimeout)
   226  	if err != nil {
   227  		t.Fatalf(predErr.Error())
   228  	}
   229  
   230  	// Wait again for any outstanding ops in the subsystems to catch up.
   231  	time.Sleep(3 * time.Second)
   232  
   233  	// Cleanup by mining the funding tx again, then closing the channel.
   234  	block = mineBlocks(t, net, 1, 1)[0]
   235  	assertTxInBlock(t, block, fundingTxID)
   236  
   237  	closeReorgedChannelAndAssert(t, net, net.Alice, chanPoint, false)
   238  }
   239  
   240  // testBasicChannelCreationAndUpdates tests multiple channel opening and closing,
   241  // and ensures that if a node is subscribed to channel updates they will be
   242  // received correctly for both cooperative and force closed channels.
   243  func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTest) {
   244  	runBasicChannelCreationAndUpdates(net, t, net.Alice, net.Bob)
   245  }
   246  
   247  // runBasicChannelCreationAndUpdates tests multiple channel opening and closing,
   248  // and ensures that if a node is subscribed to channel updates they will be
   249  // received correctly for both cooperative and force closed channels.
   250  func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
   251  	t *harnessTest, alice, bob *lntest.HarnessNode) {
   252  
   253  	ctxb := context.Background()
   254  	const (
   255  		numChannels = 2
   256  		amount      = defaultChanAmt
   257  	)
   258  
   259  	// Subscribe Bob and Alice to channel event notifications.
   260  	bobChanSub := subscribeChannelNotifications(ctxb, t, bob)
   261  	defer close(bobChanSub.quit)
   262  
   263  	aliceChanSub := subscribeChannelNotifications(ctxb, t, alice)
   264  	defer close(aliceChanSub.quit)
   265  
   266  	// Open the channels between Alice and Bob, asserting that the channels
   267  	// have been properly opened on-chain.
   268  	chanPoints := make([]*lnrpc.ChannelPoint, numChannels)
   269  	for i := 0; i < numChannels; i++ {
   270  		chanPoints[i] = openChannelAndAssert(
   271  			t, net, alice, bob, lntest.OpenChannelParams{
   272  				Amt: amount,
   273  			},
   274  		)
   275  	}
   276  
   277  	// Since each of the channels just became open, Bob and Alice should
   278  	// each receive an open and an active notification for each channel.
   279  	const numExpectedOpenUpdates = 3 * numChannels
   280  	verifyOpenUpdatesReceived := func(sub channelSubscription) error {
   281  		numChannelUpds := 0
   282  		for numChannelUpds < numExpectedOpenUpdates {
   283  			select {
   284  			case update := <-sub.updateChan:
   285  				switch update.Type {
   286  				case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL:
   287  					if numChannelUpds%3 != 0 {
   288  						return fmt.Errorf("expected " +
   289  							"open or active" +
   290  							"channel ntfn, got pending open " +
   291  							"channel ntfn instead")
   292  					}
   293  				case lnrpc.ChannelEventUpdate_OPEN_CHANNEL:
   294  					if numChannelUpds%3 != 1 {
   295  						return fmt.Errorf("expected " +
   296  							"pending open or active" +
   297  							"channel ntfn, got open" +
   298  							"channel ntfn instead")
   299  					}
   300  				case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL:
   301  					if numChannelUpds%3 != 2 {
   302  						return fmt.Errorf("expected " +
   303  							"pending open or open" +
   304  							"channel ntfn, got active " +
   305  							"channel ntfn instead")
   306  					}
   307  				default:
   308  					return fmt.Errorf("update type mismatch: "+
   309  						"expected open or active channel "+
   310  						"notification, got: %v",
   311  						update.Type)
   312  				}
   313  				numChannelUpds++
   314  
   315  			case <-time.After(time.Second * 10):
   316  				return fmt.Errorf("timeout waiting for channel "+
   317  					"notifications, only received %d/%d "+
   318  					"chanupds", numChannelUpds,
   319  					numExpectedOpenUpdates)
   320  			}
   321  		}
   322  
   323  		return nil
   324  	}
   325  
   326  	require.NoError(
   327  		t.t, verifyOpenUpdatesReceived(bobChanSub), "bob open channels",
   328  	)
   329  	require.NoError(
   330  		t.t, verifyOpenUpdatesReceived(aliceChanSub), "alice open "+
   331  			"channels",
   332  	)
   333  
   334  	// Close the channels between Alice and Bob, asserting that the channels
   335  	// have been properly closed on-chain.
   336  	for i, chanPoint := range chanPoints {
   337  		// Force close the first of the two channels.
   338  		force := i%2 == 0
   339  		closeChannelAndAssert(t, net, alice, chanPoint, force)
   340  		if force {
   341  			cleanupForceClose(t, net, alice, chanPoint)
   342  		}
   343  	}
   344  
   345  	// verifyCloseUpdatesReceived is used to verify that Alice and Bob
   346  	// receive the correct channel updates in order.
   347  	const numExpectedCloseUpdates = 3 * numChannels
   348  	verifyCloseUpdatesReceived := func(sub channelSubscription,
   349  		forceType lnrpc.ChannelCloseSummary_ClosureType,
   350  		closeInitiator lnrpc.Initiator) error {
   351  
   352  		// Ensure one inactive and one closed notification is received
   353  		// for each closed channel.
   354  		numChannelUpds := 0
   355  		for numChannelUpds < numExpectedCloseUpdates {
   356  			expectedCloseType := lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
   357  
   358  			// Every other channel should be force closed. If this
   359  			// channel was force closed, set the expected close type
   360  			// to the type passed in.
   361  			force := (numChannelUpds/3)%2 == 0
   362  			if force {
   363  				expectedCloseType = forceType
   364  			}
   365  
   366  			select {
   367  			case chanUpdate := <-sub.updateChan:
   368  				err := verifyCloseUpdate(
   369  					chanUpdate, expectedCloseType,
   370  					closeInitiator,
   371  				)
   372  				if err != nil {
   373  					return err
   374  				}
   375  
   376  				numChannelUpds++
   377  
   378  			case err := <-sub.errChan:
   379  				return err
   380  
   381  			case <-time.After(time.Second * 10):
   382  				return fmt.Errorf("timeout waiting "+
   383  					"for channel notifications, only "+
   384  					"received %d/%d chanupds",
   385  					numChannelUpds, numChannelUpds)
   386  			}
   387  		}
   388  
   389  		return nil
   390  	}
   391  
   392  	// Verify Bob receives all closed channel notifications. He should
   393  	// receive a remote force close notification for force closed channels.
   394  	// All channels (cooperatively and force closed) should have a remote
   395  	// close initiator because Alice closed the channels.
   396  	require.NoError(
   397  		t.t, verifyCloseUpdatesReceived(
   398  			bobChanSub,
   399  			lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE,
   400  			lnrpc.Initiator_INITIATOR_REMOTE,
   401  		), "verifying bob close updates",
   402  	)
   403  
   404  	// Verify Alice receives all closed channel notifications. She should
   405  	// receive a remote force close notification for force closed channels.
   406  	// All channels (cooperatively and force closed) should have a local
   407  	// close initiator because Alice closed the channels.
   408  	require.NoError(
   409  		t.t, verifyCloseUpdatesReceived(
   410  			aliceChanSub,
   411  			lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE,
   412  			lnrpc.Initiator_INITIATOR_LOCAL,
   413  		), "verifying alice close updates",
   414  	)
   415  }