github.com/decred/dcrlnd@v0.7.6/lntest/itest/lnd_channel_backup_test.go (about)

     1  package itest
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"os"
     8  	"path/filepath"
     9  	"strconv"
    10  	"strings"
    11  	"sync"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/decred/dcrd/dcrutil/v4"
    16  	"github.com/decred/dcrd/wire"
    17  	"github.com/decred/dcrlnd/chanbackup"
    18  	"github.com/decred/dcrlnd/lnrpc"
    19  	"github.com/decred/dcrlnd/lnrpc/walletrpc"
    20  	"github.com/decred/dcrlnd/lntest"
    21  	"github.com/decred/dcrlnd/lntest/wait"
    22  	"github.com/decred/dcrlnd/sweep"
    23  	"github.com/stretchr/testify/require"
    24  	"matheusd.com/testctx"
    25  )
    26  
    27  // testChannelBackupRestore tests that we're able to recover from, and initiate
    28  // the DLP protocol via: the RPC restore command, restoring on unlock, and
    29  // restoring from initial wallet creation. We'll also alternate between
    30  // restoring form the on disk file, and restoring from the exported RPC command
    31  // as well.
    32  func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
    33  	password := []byte("El Psy Kongroo")
    34  
    35  	ctxb := context.Background()
    36  
    37  	var testCases = []chanRestoreTestCase{
    38  		// Restore from backups obtained via the RPC interface. Dave
    39  		// was the initiator, of the non-advertised channel.
    40  		{
    41  			name:            "restore from RPC backup",
    42  			channelsUpdated: false,
    43  			initiator:       true,
    44  			private:         false,
    45  			restoreMethod: func(oldNode *lntest.HarnessNode,
    46  				backupFilePath string,
    47  				mnemonic []string) (nodeRestorer, error) {
    48  
    49  				// For this restoration method, we'll grab the
    50  				// current multi-channel backup from the old
    51  				// node, and use it to restore a new node
    52  				// within the closure.
    53  				req := &lnrpc.ChanBackupExportRequest{}
    54  				chanBackup, err := oldNode.ExportAllChannelBackups(
    55  					ctxb, req,
    56  				)
    57  				if err != nil {
    58  					return nil, fmt.Errorf("unable to obtain "+
    59  						"channel backup: %v", err)
    60  				}
    61  
    62  				multi := chanBackup.MultiChanBackup.MultiChanBackup
    63  
    64  				// In our nodeRestorer function, we'll restore
    65  				// the node from seed, then manually recover
    66  				// the channel backup.
    67  				return chanRestoreViaRPC(
    68  					net, password, mnemonic, multi, oldNode,
    69  				)
    70  			},
    71  		},
    72  
    73  		// Restore the backup from the on-disk file, using the RPC
    74  		// interface.
    75  		{
    76  			name:      "restore from backup file",
    77  			initiator: true,
    78  			private:   false,
    79  			restoreMethod: func(oldNode *lntest.HarnessNode,
    80  				backupFilePath string,
    81  				mnemonic []string) (nodeRestorer, error) {
    82  
    83  				// Read the entire Multi backup stored within
    84  				// this node's channel.backup file.
    85  				multi, err := ioutil.ReadFile(backupFilePath)
    86  				if err != nil {
    87  					return nil, err
    88  				}
    89  
    90  				// Now that we have Dave's backup file, we'll
    91  				// create a new nodeRestorer that will restore
    92  				// using the on-disk channel.backup.
    93  				return chanRestoreViaRPC(
    94  					net, password, mnemonic, multi, oldNode,
    95  				)
    96  			},
    97  		},
    98  
    99  		// Restore the backup as part of node initialization with the
   100  		// prior mnemonic and new backup seed.
   101  		{
   102  			name:      "restore during creation",
   103  			initiator: true,
   104  			private:   false,
   105  			restoreMethod: func(oldNode *lntest.HarnessNode,
   106  				backupFilePath string,
   107  				mnemonic []string) (nodeRestorer, error) {
   108  
   109  				// First, fetch the current backup state as is,
   110  				// to obtain our latest Multi.
   111  				chanBackup, err := oldNode.ExportAllChannelBackups(
   112  					ctxb, &lnrpc.ChanBackupExportRequest{},
   113  				)
   114  				if err != nil {
   115  					return nil, fmt.Errorf("unable to obtain "+
   116  						"channel backup: %v", err)
   117  				}
   118  				backupSnapshot := &lnrpc.ChanBackupSnapshot{
   119  					MultiChanBackup: chanBackup.MultiChanBackup,
   120  				}
   121  
   122  				// Create a new nodeRestorer that will restore
   123  				// the node using the Multi backup we just
   124  				// obtained above.
   125  				return func() (*lntest.HarnessNode, error) {
   126  					return net.RestoreNodeWithSeed(
   127  						"dave", nil, password, mnemonic,
   128  						"", 1000, backupSnapshot,
   129  						copyPorts(oldNode),
   130  					)
   131  				}, nil
   132  			},
   133  		},
   134  
   135  		// Restore the backup once the node has already been
   136  		// re-created, using the Unlock call.
   137  		{
   138  			name:      "restore during unlock",
   139  			initiator: true,
   140  			private:   false,
   141  			restoreMethod: func(oldNode *lntest.HarnessNode,
   142  				backupFilePath string,
   143  				mnemonic []string) (nodeRestorer, error) {
   144  
   145  				// First, fetch the current backup state as is,
   146  				// to obtain our latest Multi.
   147  				chanBackup, err := oldNode.ExportAllChannelBackups(
   148  					ctxb, &lnrpc.ChanBackupExportRequest{},
   149  				)
   150  				if err != nil {
   151  					return nil, fmt.Errorf("unable to obtain "+
   152  						"channel backup: %v", err)
   153  				}
   154  				backupSnapshot := &lnrpc.ChanBackupSnapshot{
   155  					MultiChanBackup: chanBackup.MultiChanBackup,
   156  				}
   157  
   158  				// Create a new nodeRestorer that will restore
   159  				// the node with its seed, but no channel
   160  				// backup, shutdown this initialized node, then
   161  				// restart it again using Unlock.
   162  				return func() (*lntest.HarnessNode, error) {
   163  					newNode, err := net.RestoreNodeWithSeed(
   164  						"dave", nil, password, mnemonic,
   165  						"", 1000, nil,
   166  						copyPorts(oldNode),
   167  					)
   168  					if err != nil {
   169  						return nil, err
   170  					}
   171  
   172  					err = net.RestartNode(
   173  						newNode, nil, backupSnapshot,
   174  					)
   175  					if err != nil {
   176  						return nil, err
   177  					}
   178  
   179  					return newNode, nil
   180  				}, nil
   181  			},
   182  		},
   183  
   184  		// Restore the backup from the on-disk file a second time to
   185  		// make sure imports can be canceled and later resumed.
   186  		{
   187  			name:      "restore from backup file twice",
   188  			initiator: true,
   189  			private:   false,
   190  			restoreMethod: func(oldNode *lntest.HarnessNode,
   191  				backupFilePath string,
   192  				mnemonic []string) (nodeRestorer, error) {
   193  
   194  				// Read the entire Multi backup stored within
   195  				// this node's channel.backup file.
   196  				multi, err := ioutil.ReadFile(backupFilePath)
   197  				if err != nil {
   198  					return nil, err
   199  				}
   200  
   201  				// Now that we have Dave's backup file, we'll
   202  				// create a new nodeRestorer that will restore
   203  				// using the on-disk channel.backup.
   204  				backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
   205  					MultiChanBackup: multi,
   206  				}
   207  
   208  				ctxb := context.Background()
   209  
   210  				return func() (*lntest.HarnessNode, error) {
   211  					newNode, err := net.RestoreNodeWithSeed(
   212  						"dave", nil, password, mnemonic,
   213  						"", 1000, nil,
   214  						copyPorts(oldNode),
   215  					)
   216  					if err != nil {
   217  						return nil, fmt.Errorf("unable to "+
   218  							"restore node: %v", err)
   219  					}
   220  
   221  					_, err = newNode.RestoreChannelBackups(
   222  						ctxb,
   223  						&lnrpc.RestoreChanBackupRequest{
   224  							Backup: backup,
   225  						},
   226  					)
   227  					if err != nil {
   228  						return nil, fmt.Errorf("unable "+
   229  							"to restore backups: %v",
   230  							err)
   231  					}
   232  
   233  					_, err = newNode.RestoreChannelBackups(
   234  						ctxb,
   235  						&lnrpc.RestoreChanBackupRequest{
   236  							Backup: backup,
   237  						},
   238  					)
   239  					if err != nil {
   240  						return nil, fmt.Errorf("unable "+
   241  							"to restore backups the"+
   242  							"second time: %v",
   243  							err)
   244  					}
   245  
   246  					return newNode, nil
   247  				}, nil
   248  			},
   249  		},
   250  
   251  		// Use the channel backup file that contains an unconfirmed
   252  		// channel and make sure recovery works as well.
   253  		{
   254  			name:            "restore unconfirmed channel file",
   255  			channelsUpdated: false,
   256  			initiator:       true,
   257  			private:         false,
   258  			unconfirmed:     true,
   259  			restoreMethod: func(oldNode *lntest.HarnessNode,
   260  				backupFilePath string,
   261  				mnemonic []string) (nodeRestorer, error) {
   262  
   263  				// Read the entire Multi backup stored within
   264  				// this node's channel.backup file.
   265  				multi, err := ioutil.ReadFile(backupFilePath)
   266  				if err != nil {
   267  					return nil, err
   268  				}
   269  
   270  				// Let's assume time passes, the channel
   271  				// confirms in the meantime but for some reason
   272  				// the backup we made while it was still
   273  				// unconfirmed is the only backup we have. We
   274  				// should still be able to restore it. To
   275  				// simulate time passing, we mine some blocks
   276  				// to get the channel confirmed _after_ we saved
   277  				// the backup.
   278  				mineBlocks(t, net, 6, 1)
   279  
   280  				// In our nodeRestorer function, we'll restore
   281  				// the node from seed, then manually recover
   282  				// the channel backup.
   283  				return chanRestoreViaRPC(
   284  					net, password, mnemonic, multi, oldNode,
   285  				)
   286  			},
   287  		},
   288  
   289  		// Create a backup using RPC that contains an unconfirmed
   290  		// channel and make sure recovery works as well.
   291  		{
   292  			name:            "restore unconfirmed channel RPC",
   293  			channelsUpdated: false,
   294  			initiator:       true,
   295  			private:         false,
   296  			unconfirmed:     true,
   297  			restoreMethod: func(oldNode *lntest.HarnessNode,
   298  				backupFilePath string,
   299  				mnemonic []string) (nodeRestorer, error) {
   300  
   301  				// For this restoration method, we'll grab the
   302  				// current multi-channel backup from the old
   303  				// node. The channel should be included, even if
   304  				// it is not confirmed yet.
   305  				req := &lnrpc.ChanBackupExportRequest{}
   306  				chanBackup, err := oldNode.ExportAllChannelBackups(
   307  					ctxb, req,
   308  				)
   309  				if err != nil {
   310  					return nil, fmt.Errorf("unable to obtain "+
   311  						"channel backup: %v", err)
   312  				}
   313  				chanPoints := chanBackup.MultiChanBackup.ChanPoints
   314  				if len(chanPoints) == 0 {
   315  					return nil, fmt.Errorf("unconfirmed " +
   316  						"channel not included in backup")
   317  				}
   318  
   319  				// Let's assume time passes, the channel
   320  				// confirms in the meantime but for some reason
   321  				// the backup we made while it was still
   322  				// unconfirmed is the only backup we have. We
   323  				// should still be able to restore it. To
   324  				// simulate time passing, we mine some blocks
   325  				// to get the channel confirmed _after_ we saved
   326  				// the backup.
   327  				mineBlocks(t, net, 6, 1)
   328  
   329  				// In our nodeRestorer function, we'll restore
   330  				// the node from seed, then manually recover
   331  				// the channel backup.
   332  				multi := chanBackup.MultiChanBackup.MultiChanBackup
   333  				return chanRestoreViaRPC(
   334  					net, password, mnemonic, multi, oldNode,
   335  				)
   336  			},
   337  		},
   338  
   339  		// Restore the backup from the on-disk file, using the RPC
   340  		// interface, for anchor commitment channels.
   341  		{
   342  			name:           "restore from backup file anchors",
   343  			initiator:      true,
   344  			private:        false,
   345  			commitmentType: lnrpc.CommitmentType_ANCHORS,
   346  			restoreMethod: func(oldNode *lntest.HarnessNode,
   347  				backupFilePath string,
   348  				mnemonic []string) (nodeRestorer, error) {
   349  
   350  				// Read the entire Multi backup stored within
   351  				// this node's channels.backup file.
   352  				multi, err := ioutil.ReadFile(backupFilePath)
   353  				if err != nil {
   354  					return nil, err
   355  				}
   356  
   357  				// Now that we have Dave's backup file, we'll
   358  				// create a new nodeRestorer that will restore
   359  				// using the on-disk channels.backup.
   360  				return chanRestoreViaRPC(
   361  					net, password, mnemonic, multi, oldNode,
   362  				)
   363  			},
   364  		},
   365  
   366  		// Restore the backup from the on-disk file, using the RPC
   367  		// interface, for script-enforced leased channels.
   368  		{
   369  			name:           "restore from backup file script enforced lease",
   370  			initiator:      true,
   371  			private:        false,
   372  			commitmentType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
   373  			restoreMethod: func(oldNode *lntest.HarnessNode,
   374  				backupFilePath string,
   375  				mnemonic []string) (nodeRestorer, error) {
   376  
   377  				// Read the entire Multi backup stored within
   378  				// this node's channel.backup file.
   379  				multi, err := ioutil.ReadFile(backupFilePath)
   380  				if err != nil {
   381  					return nil, err
   382  				}
   383  
   384  				// Now that we have Dave's backup file, we'll
   385  				// create a new nodeRestorer that will restore
   386  				// using the on-disk channel.backup.
   387  				return chanRestoreViaRPC(
   388  					net, password, mnemonic, multi, oldNode,
   389  				)
   390  			},
   391  		},
   392  
   393  		// Restore by also creating a channel with the legacy revocation
   394  		// producer format to make sure old SCBs can still be recovered.
   395  		{
   396  			name:             "old revocation producer format",
   397  			initiator:        true,
   398  			legacyRevocation: true,
   399  			restoreMethod: func(oldNode *lntest.HarnessNode,
   400  				backupFilePath string,
   401  				mnemonic []string) (nodeRestorer, error) {
   402  
   403  				// For this restoration method, we'll grab the
   404  				// current multi-channel backup from the old
   405  				// node, and use it to restore a new node
   406  				// within the closure.
   407  				req := &lnrpc.ChanBackupExportRequest{}
   408  				chanBackup, err := oldNode.ExportAllChannelBackups(
   409  					ctxb, req,
   410  				)
   411  				require.NoError(t.t, err)
   412  
   413  				multi := chanBackup.MultiChanBackup.MultiChanBackup
   414  
   415  				// In our nodeRestorer function, we'll restore
   416  				// the node from seed, then manually recover the
   417  				// channel backup.
   418  				return chanRestoreViaRPC(
   419  					net, password, mnemonic, multi, oldNode,
   420  				)
   421  			},
   422  		},
   423  
   424  		// Restore a channel that was force closed by dave just before
   425  		// going offline.
   426  		{
   427  			name: "restore force closed from backup file " +
   428  				"anchors",
   429  			initiator:       true,
   430  			private:         false,
   431  			commitmentType:  lnrpc.CommitmentType_ANCHORS,
   432  			localForceClose: true,
   433  			restoreMethod: func(oldNode *lntest.HarnessNode,
   434  				backupFilePath string,
   435  				mnemonic []string) (nodeRestorer, error) {
   436  
   437  				// Read the entire Multi backup stored within
   438  				// this node's channel.backup file.
   439  				multi, err := ioutil.ReadFile(backupFilePath)
   440  				if err != nil {
   441  					return nil, err
   442  				}
   443  
   444  				// Now that we have Dave's backup file, we'll
   445  				// create a new nodeRestorer that will restore
   446  				// using the on-disk channel.backup.
   447  				return chanRestoreViaRPC(
   448  					net, password, mnemonic, multi, oldNode,
   449  				)
   450  			},
   451  		},
   452  	}
   453  
   454  	// TODO(roasbeef): online vs offline close?
   455  
   456  	// TODO(roasbeef): need to re-trigger the on-disk file once the node
   457  	// ann is updated?
   458  
   459  	for _, testCase := range testCases {
   460  		testCase := testCase
   461  		success := t.t.Run(testCase.name, func(t *testing.T) {
   462  			h := newHarnessTest(t, net)
   463  
   464  			// Start each test with the default static fee estimate.
   465  			net.SetFeeEstimate(10000)
   466  
   467  			testChanRestoreScenario(h, net, &testCase, password)
   468  		})
   469  		if !success {
   470  			break
   471  		}
   472  	}
   473  }
   474  
   475  // testChannelBackupUpdates tests that both the streaming channel update RPC,
   476  // and the on-disk channel.backup are updated each time a channel is
   477  // opened/closed.
   478  func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
   479  	ctxb := context.Background()
   480  
   481  	// First, we'll make a temp directory that we'll use to store our
   482  	// backup file, so we can check in on it during the test easily.
   483  	backupDir, err := ioutil.TempDir("", "")
   484  	if err != nil {
   485  		t.Fatalf("unable to create backup dir: %v", err)
   486  	}
   487  	defer os.RemoveAll(backupDir)
   488  
   489  	// First, we'll create a new node, Carol. We'll also create a temporary
   490  	// file that Carol will use to store her channel backups.
   491  	backupFilePath := filepath.Join(
   492  		backupDir, chanbackup.DefaultBackupFileName,
   493  	)
   494  	carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath)
   495  	carol := net.NewNode(t.t, "carol", []string{carolArgs})
   496  	defer shutdownAndAssert(net, t, carol)
   497  
   498  	// Next, we'll register for streaming notifications for changes to the
   499  	// backup file.
   500  	backupStream, err := carol.SubscribeChannelBackups(
   501  		ctxb, &lnrpc.ChannelBackupSubscription{},
   502  	)
   503  	if err != nil {
   504  		t.Fatalf("unable to create backup stream: %v", err)
   505  	}
   506  
   507  	// We'll use this goroutine to proxy any updates to a channel we can
   508  	// easily use below.
   509  	var wg sync.WaitGroup
   510  	backupUpdates := make(chan *lnrpc.ChanBackupSnapshot)
   511  	streamErr := make(chan error)
   512  	streamQuit := make(chan struct{})
   513  
   514  	wg.Add(1)
   515  	go func() {
   516  		defer wg.Done()
   517  		for {
   518  			snapshot, err := backupStream.Recv()
   519  			if err != nil {
   520  				select {
   521  				case streamErr <- err:
   522  				case <-streamQuit:
   523  					return
   524  				}
   525  			}
   526  
   527  			select {
   528  			case backupUpdates <- snapshot:
   529  			case <-streamQuit:
   530  				return
   531  			}
   532  		}
   533  	}()
   534  	defer close(streamQuit)
   535  
   536  	// With Carol up, we'll now connect her to Alice, and open a channel
   537  	// between them.
   538  	net.ConnectNodes(t.t, carol, net.Alice)
   539  
   540  	// Next, we'll open two channels between Alice and Carol back to back.
   541  	var chanPoints []*lnrpc.ChannelPoint
   542  	numChans := 2
   543  	chanAmt := dcrutil.Amount(1000000)
   544  	for i := 0; i < numChans; i++ {
   545  		chanPoint := openChannelAndAssert(
   546  			t, net, net.Alice, carol,
   547  			lntest.OpenChannelParams{Amt: chanAmt},
   548  		)
   549  
   550  		chanPoints = append(chanPoints, chanPoint)
   551  	}
   552  
   553  	// Using this helper function, we'll maintain a pointer to the latest
   554  	// channel backup so we can compare it to the on disk state.
   555  	var currentBackup *lnrpc.ChanBackupSnapshot
   556  	assertBackupNtfns := func(numNtfns int) {
   557  		for i := 0; i < numNtfns; i++ {
   558  			select {
   559  			case err := <-streamErr:
   560  				t.Fatalf("error with backup stream: %v", err)
   561  
   562  			case currentBackup = <-backupUpdates:
   563  
   564  			case <-time.After(time.Second * 5):
   565  				t.Fatalf("didn't receive channel backup "+
   566  					"notification %v", i+1)
   567  			}
   568  		}
   569  	}
   570  
   571  	// assertBackupFileState is a helper function that we'll use to compare
   572  	// the on disk back up file to our currentBackup pointer above.
   573  	assertBackupFileState := func() {
   574  		err := wait.NoError(func() error {
   575  			packedBackup, err := ioutil.ReadFile(backupFilePath)
   576  			if err != nil {
   577  				return fmt.Errorf("unable to read backup "+
   578  					"file: %v", err)
   579  			}
   580  
   581  			// As each back up file will be encrypted with a fresh
   582  			// nonce, we can't compare them directly, so instead
   583  			// we'll compare the length which is a proxy for the
   584  			// number of channels that the multi-backup contains.
   585  			rawBackup := currentBackup.MultiChanBackup.MultiChanBackup
   586  			if len(rawBackup) != len(packedBackup) {
   587  				return fmt.Errorf("backup files don't match: "+
   588  					"expected %x got %x", rawBackup, packedBackup)
   589  			}
   590  
   591  			// Additionally, we'll assert that both backups up
   592  			// returned are valid.
   593  			for i, backup := range [][]byte{rawBackup, packedBackup} {
   594  				snapshot := &lnrpc.ChanBackupSnapshot{
   595  					MultiChanBackup: &lnrpc.MultiChanBackup{
   596  						MultiChanBackup: backup,
   597  					},
   598  				}
   599  				_, err := carol.VerifyChanBackup(ctxb, snapshot)
   600  				if err != nil {
   601  					return fmt.Errorf("unable to verify "+
   602  						"backup #%d: %v", i, err)
   603  				}
   604  			}
   605  
   606  			return nil
   607  		}, defaultTimeout)
   608  		if err != nil {
   609  			t.Fatalf("backup state invalid: %v", err)
   610  		}
   611  	}
   612  
   613  	// As these two channels were just opened, we should've got two times
   614  	// the pending and open notifications for channel backups.
   615  	assertBackupNtfns(2 * 2)
   616  
   617  	// The on disk file should also exactly match the latest backup that we
   618  	// have.
   619  	assertBackupFileState()
   620  
   621  	// Next, we'll close the channels one by one. After each channel
   622  	// closure, we should get a notification, and the on-disk state should
   623  	// match this state as well.
   624  	for i := 0; i < numChans; i++ {
   625  		// To ensure force closes also trigger an update, we'll force
   626  		// close half of the channels.
   627  		forceClose := i%2 == 0
   628  
   629  		chanPoint := chanPoints[i]
   630  
   631  		closeChannelAndAssert(t, net, net.Alice, chanPoint, forceClose)
   632  
   633  		// If we force closed the channel, then we'll mine enough
   634  		// blocks to ensure all outputs have been swept.
   635  		if forceClose {
   636  			// A local force closed channel will trigger a
   637  			// notification once the commitment TX confirms on
   638  			// chain. But that won't remove the channel from the
   639  			// backup just yet, that will only happen once the time
   640  			// locked contract was fully resolved on chain.
   641  			assertBackupNtfns(1)
   642  
   643  			cleanupForceClose(t, net, net.Alice, chanPoint)
   644  
   645  			// Now that the channel's been fully resolved, we expect
   646  			// another notification.
   647  			assertBackupNtfns(1)
   648  			assertBackupFileState()
   649  		} else {
   650  			// We should get a single notification after closing,
   651  			// and the on-disk state should match this latest
   652  			// notifications.
   653  			assertBackupNtfns(1)
   654  			assertBackupFileState()
   655  		}
   656  	}
   657  }
   658  
   659  // testExportChannelBackup tests that we're able to properly export either a
   660  // targeted channel's backup, or export backups of all the currents open
   661  // channels.
   662  func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) {
   663  	ctxb := context.Background()
   664  
   665  	// First, we'll create our primary test node: Carol. We'll use Carol to
   666  	// open channels and also export backups that we'll examine throughout
   667  	// the test.
   668  	carol := net.NewNode(t.t, "carol", nil)
   669  	defer shutdownAndAssert(net, t, carol)
   670  
   671  	// With Carol up, we'll now connect her to Alice, and open a channel
   672  	// between them.
   673  	net.ConnectNodes(t.t, carol, net.Alice)
   674  
   675  	// Next, we'll open two channels between Alice and Carol back to back.
   676  	var chanPoints []*lnrpc.ChannelPoint
   677  	numChans := 2
   678  	chanAmt := dcrutil.Amount(1000000)
   679  	for i := 0; i < numChans; i++ {
   680  		chanPoint := openChannelAndAssert(
   681  			t, net, net.Alice, carol,
   682  			lntest.OpenChannelParams{Amt: chanAmt},
   683  		)
   684  
   685  		chanPoints = append(chanPoints, chanPoint)
   686  	}
   687  
   688  	// Now that the channels are open, we should be able to fetch the
   689  	// backups of each of the channels.
   690  	for _, chanPoint := range chanPoints {
   691  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   692  		req := &lnrpc.ExportChannelBackupRequest{
   693  			ChanPoint: chanPoint,
   694  		}
   695  		chanBackup, err := carol.ExportChannelBackup(ctxt, req)
   696  		if err != nil {
   697  			t.Fatalf("unable to fetch backup for channel %v: %v",
   698  				chanPoint, err)
   699  		}
   700  
   701  		// The returned backup should be full populated. Since it's
   702  		// encrypted, we can't assert any more than that atm.
   703  		if len(chanBackup.ChanBackup) == 0 {
   704  			t.Fatalf("obtained empty backup for channel: %v", chanPoint)
   705  		}
   706  
   707  		// The specified chanPoint in the response should match our
   708  		// requested chanPoint.
   709  		if chanBackup.ChanPoint.String() != chanPoint.String() {
   710  			t.Fatalf("chanPoint mismatched: expected %v, got %v",
   711  				chanPoint.String(),
   712  				chanBackup.ChanPoint.String())
   713  		}
   714  	}
   715  
   716  	// Before we proceed, we'll make two utility methods we'll use below
   717  	// for our primary assertions.
   718  	assertNumSingleBackups := func(numSingles int) {
   719  		err := wait.NoError(func() error {
   720  			ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   721  			req := &lnrpc.ChanBackupExportRequest{}
   722  			chanSnapshot, err := carol.ExportAllChannelBackups(
   723  				ctxt, req,
   724  			)
   725  			if err != nil {
   726  				return fmt.Errorf("unable to export channel "+
   727  					"backup: %v", err)
   728  			}
   729  
   730  			if chanSnapshot.SingleChanBackups == nil {
   731  				return fmt.Errorf("single chan backups not " +
   732  					"populated")
   733  			}
   734  
   735  			backups := chanSnapshot.SingleChanBackups.ChanBackups
   736  			if len(backups) != numSingles {
   737  				return fmt.Errorf("expected %v singles, "+
   738  					"got %v", len(backups), numSingles)
   739  			}
   740  
   741  			return nil
   742  		}, defaultTimeout)
   743  		if err != nil {
   744  			t.Fatalf(err.Error())
   745  		}
   746  	}
   747  	assertMultiBackupFound := func() func(bool, map[wire.OutPoint]struct{}) {
   748  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
   749  		req := &lnrpc.ChanBackupExportRequest{}
   750  		chanSnapshot, err := carol.ExportAllChannelBackups(ctxt, req)
   751  		if err != nil {
   752  			t.Fatalf("unable to export channel backup: %v", err)
   753  		}
   754  
   755  		return func(found bool, chanPoints map[wire.OutPoint]struct{}) {
   756  			switch {
   757  			case found && chanSnapshot.MultiChanBackup == nil:
   758  				t.Fatalf("multi-backup not present")
   759  
   760  			case !found && chanSnapshot.MultiChanBackup != nil &&
   761  				(len(chanSnapshot.MultiChanBackup.MultiChanBackup) !=
   762  					chanbackup.NilMultiSizePacked):
   763  
   764  				t.Fatalf("found multi-backup when non should " +
   765  					"be found")
   766  			}
   767  
   768  			if !found {
   769  				return
   770  			}
   771  
   772  			backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints
   773  			if len(chanPoints) != len(backedUpChans) {
   774  				t.Fatalf("expected %v chans got %v", len(chanPoints),
   775  					len(backedUpChans))
   776  			}
   777  
   778  			for _, chanPoint := range backedUpChans {
   779  				wirePoint := rpcPointToWirePoint(t, chanPoint)
   780  				if _, ok := chanPoints[wirePoint]; !ok {
   781  					t.Fatalf("unexpected backup: %v", wirePoint)
   782  				}
   783  			}
   784  		}
   785  	}
   786  
   787  	chans := make(map[wire.OutPoint]struct{})
   788  	for _, chanPoint := range chanPoints {
   789  		chans[rpcPointToWirePoint(t, chanPoint)] = struct{}{}
   790  	}
   791  
   792  	// We should have exactly two single channel backups contained, and we
   793  	// should also have a multi-channel backup.
   794  	assertNumSingleBackups(2)
   795  	assertMultiBackupFound()(true, chans)
   796  
   797  	// We'll now close each channel on by one. After we close a channel, we
   798  	// shouldn't be able to find that channel as a backup still. We should
   799  	// also have one less single written to disk.
   800  	for i, chanPoint := range chanPoints {
   801  		closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
   802  
   803  		assertNumSingleBackups(len(chanPoints) - i - 1)
   804  
   805  		delete(chans, rpcPointToWirePoint(t, chanPoint))
   806  		assertMultiBackupFound()(true, chans)
   807  	}
   808  
   809  	// At this point we shouldn't have any single or multi-chan backups at
   810  	// all.
   811  	assertNumSingleBackups(0)
   812  	assertMultiBackupFound()(false, nil)
   813  }
   814  
   815  // nodeRestorer is a function closure that allows each chanRestoreTestCase to
   816  // control exactly *how* the prior node is restored. This might be using an
   817  // backup obtained over RPC, or the file system, etc.
   818  type nodeRestorer func() (*lntest.HarnessNode, error)
   819  
   820  // chanRestoreTestCase describes a test case for an end to end SCB restoration
   821  // work flow. One node will start from scratch using an existing SCB. At the
   822  // end of the est, both nodes should be made whole via the DLP protocol.
   823  type chanRestoreTestCase struct {
   824  	// name is the name of the target test case.
   825  	name string
   826  
   827  	// channelsUpdated is false then this means that no updates
   828  	// have taken place within the channel before restore.
   829  	// Otherwise, HTLCs will be settled between the two parties
   830  	// before restoration modifying the balance beyond the initial
   831  	// allocation.
   832  	channelsUpdated bool
   833  
   834  	// initiator signals if Dave should be the one that opens the
   835  	// channel to Alice, or if it should be the other way around.
   836  	initiator bool
   837  
   838  	// private signals if the channel from Dave to Carol should be
   839  	// private or not.
   840  	private bool
   841  
   842  	// unconfirmed signals if the channel from Dave to Carol should be
   843  	// confirmed or not.
   844  	unconfirmed bool
   845  
   846  	// commitmentType specifies the commitment type that should be used for
   847  	// the channel from Dave to Carol.
   848  	commitmentType lnrpc.CommitmentType
   849  
   850  	// legacyRevocation signals if a channel with the legacy revocation
   851  	// producer format should also be created before restoring.
   852  	legacyRevocation bool
   853  
   854  	// localForceClose signals if the channel should be force closed by the
   855  	// node that is going to recover.
   856  	localForceClose bool
   857  
   858  	// restoreMethod takes an old node, then returns a function
   859  	// closure that'll return the same node, but with its state
   860  	// restored via a custom method. We use this to abstract away
   861  	// _how_ a node is restored from our assertions once the node
   862  	// has been fully restored itself.
   863  	restoreMethod func(oldNode *lntest.HarnessNode,
   864  		backupFilePath string,
   865  		mnemonic []string) (nodeRestorer, error)
   866  }
   867  
   868  // testChanRestoreScenario executes a chanRestoreTestCase from end to end,
   869  // ensuring that after Dave restores his channel state according to the
   870  // testCase, the DLP protocol is executed properly and both nodes are made
   871  // whole.
   872  func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
   873  	testCase *chanRestoreTestCase, password []byte) {
   874  
   875  	const (
   876  		chanAmt = dcrutil.Amount(10000000)
   877  		pushAmt = dcrutil.Amount(5000000)
   878  	)
   879  
   880  	ctxb := context.Background()
   881  
   882  	nodeArgs := []string{
   883  		"--minbackoff=50ms",
   884  		"--maxbackoff=1s",
   885  	}
   886  	if testCase.commitmentType != lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE {
   887  		args := nodeArgsForCommitType(testCase.commitmentType)
   888  		nodeArgs = append(nodeArgs, args...)
   889  	}
   890  
   891  	// First, we'll create a brand new node we'll use within the test. If
   892  	// we have a custom backup file specified, then we'll also create that
   893  	// for use.
   894  	dave, mnemonic, _, err := net.NewNodeWithSeed(
   895  		"dave", nodeArgs, password, false,
   896  	)
   897  	if err != nil {
   898  		t.Fatalf("unable to create new node: %v", err)
   899  	}
   900  	// Defer to a closure instead of to shutdownAndAssert due to the value
   901  	// of 'dave' changing throughout the test.
   902  	defer func() {
   903  		shutdownAndAssert(net, t, dave)
   904  	}()
   905  	carol := net.NewNode(t.t, "carol", nodeArgs)
   906  	defer shutdownAndAssert(net, t, carol)
   907  
   908  	// Now that our new nodes are created, we'll give them some coins for
   909  	// channel opening and anchor sweeping.
   910  	net.SendCoins(t.t, dcrutil.AtomsPerCoin, carol)
   911  
   912  	// For the anchor output case we need two UTXOs for Carol so she can
   913  	// sweep both the local and remote anchor.
   914  	if commitTypeHasAnchors(testCase.commitmentType) {
   915  		net.SendCoins(t.t, dcrutil.AtomsPerCoin, carol)
   916  	}
   917  
   918  	net.SendCoins(t.t, dcrutil.AtomsPerCoin, dave)
   919  
   920  	var from, to *lntest.HarnessNode
   921  	if testCase.initiator {
   922  		from, to = dave, carol
   923  	} else {
   924  		from, to = carol, dave
   925  	}
   926  
   927  	// Next, we'll connect Dave to Carol, and open a new channel to her
   928  	// with a portion pushed.
   929  	net.ConnectNodes(t.t, dave, carol)
   930  
   931  	// We will either open a confirmed or unconfirmed channel, depending on
   932  	// the requirements of the test case.
   933  	var chanPoint *lnrpc.ChannelPoint
   934  	switch {
   935  	case testCase.unconfirmed:
   936  		_, err := net.OpenPendingChannel(
   937  			from, to, chanAmt, pushAmt,
   938  		)
   939  		if err != nil {
   940  			t.Fatalf("couldn't open pending channel: %v", err)
   941  		}
   942  
   943  		// Give the pubsub some time to update the channel backup.
   944  		err = wait.NoError(func() error {
   945  			fi, err := os.Stat(dave.ChanBackupPath())
   946  			if err != nil {
   947  				return err
   948  			}
   949  			if fi.Size() <= chanbackup.NilMultiSizePacked {
   950  				return fmt.Errorf("backup file empty")
   951  			}
   952  			return nil
   953  		}, defaultTimeout)
   954  		if err != nil {
   955  			t.Fatalf("channel backup not updated in time: %v", err)
   956  		}
   957  
   958  	// Also create channels with the legacy revocation producer format if
   959  	// requested.
   960  	case testCase.legacyRevocation:
   961  		createLegacyRevocationChannel(
   962  			net, t, chanAmt, pushAmt, from, to,
   963  		)
   964  
   965  	default:
   966  		var fundingShim *lnrpc.FundingShim
   967  		if testCase.commitmentType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
   968  			_, minerHeight, err := net.Miner.Node.GetBestBlock(testctx.New(t))
   969  			require.NoError(t.t, err)
   970  			thawHeight := uint32(minerHeight + 144)
   971  
   972  			fundingShim, _, _ = deriveFundingShim(
   973  				net, t, from, to, chanAmt, thawHeight, true,
   974  			)
   975  		}
   976  		chanPoint = openChannelAndAssert(
   977  			t, net, from, to, lntest.OpenChannelParams{
   978  				Amt:            chanAmt,
   979  				PushAmt:        pushAmt,
   980  				Private:        testCase.private,
   981  				FundingShim:    fundingShim,
   982  				CommitmentType: testCase.commitmentType,
   983  			},
   984  		)
   985  
   986  		// Wait for both sides to see the opened channel.
   987  		err = dave.WaitForNetworkChannelOpen(chanPoint)
   988  		if err != nil {
   989  			t.Fatalf("dave didn't report channel: %v", err)
   990  		}
   991  		err = carol.WaitForNetworkChannelOpen(chanPoint)
   992  		if err != nil {
   993  			t.Fatalf("carol didn't report channel: %v", err)
   994  		}
   995  	}
   996  
   997  	// If both parties should start with existing channel updates, then
   998  	// we'll send+settle an HTLC between 'from' and 'to' now.
   999  	if testCase.channelsUpdated {
  1000  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1001  
  1002  		invoice := &lnrpc.Invoice{
  1003  			Memo:  "testing",
  1004  			Value: 100000,
  1005  		}
  1006  		invoiceResp, err := to.AddInvoice(ctxt, invoice)
  1007  		if err != nil {
  1008  			t.Fatalf("unable to add invoice: %v", err)
  1009  		}
  1010  
  1011  		err = completePaymentRequests(
  1012  			from, from.RouterClient,
  1013  			[]string{invoiceResp.PaymentRequest}, true,
  1014  		)
  1015  		if err != nil {
  1016  			t.Fatalf("unable to complete payments: %v", err)
  1017  		}
  1018  
  1019  		// Ensure the commitments are actually updated and no HTLCs
  1020  		// remain active.
  1021  		err = wait.NoError(func() error {
  1022  			return assertNumActiveHtlcs([]*lntest.HarnessNode{to, from}, 0)
  1023  		}, defaultTimeout)
  1024  		if err != nil {
  1025  			t.Fatalf("node still has active HTLCs: %v", err)
  1026  		}
  1027  	}
  1028  
  1029  	// If we're testing that locally force closed channels can be restored
  1030  	// then we issue the force close now.
  1031  	if testCase.localForceClose && chanPoint != nil {
  1032  		ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
  1033  		defer cancel()
  1034  
  1035  		_, err = dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{
  1036  			ChannelPoint: chanPoint,
  1037  			Force:        true,
  1038  		})
  1039  		require.NoError(t.t, err)
  1040  
  1041  		// After closing the channel we mine one transaction to make
  1042  		// sure the commitment TX was confirmed.
  1043  		_ = mineBlocks(t, net, 1, 1)
  1044  
  1045  		// Now we need to make sure that the channel is still in the
  1046  		// backup. Otherwise restoring won't work later.
  1047  		_, err = dave.ExportChannelBackup(
  1048  			ctxt, &lnrpc.ExportChannelBackupRequest{
  1049  				ChanPoint: chanPoint,
  1050  			},
  1051  		)
  1052  		require.NoError(t.t, err)
  1053  	}
  1054  
  1055  	// Before we start the recovery, we'll record the balances of both
  1056  	// Carol and Dave to ensure they both sweep their coins at the end.
  1057  	balReq := &lnrpc.WalletBalanceRequest{}
  1058  	ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1059  	carolBalResp, err := carol.WalletBalance(ctxt, balReq)
  1060  	if err != nil {
  1061  		t.Fatalf("unable to get carol's balance: %v", err)
  1062  	}
  1063  	carolStartingBalance := carolBalResp.ConfirmedBalance
  1064  
  1065  	daveBalance, err := dave.WalletBalance(ctxt, balReq)
  1066  	if err != nil {
  1067  		t.Fatalf("unable to get carol's balance: %v", err)
  1068  	}
  1069  	daveStartingBalance := daveBalance.ConfirmedBalance
  1070  
  1071  	// At this point, we'll now execute the restore method to give us the
  1072  	// new node we should attempt our assertions against.
  1073  	backupFilePath := dave.ChanBackupPath()
  1074  	restoredNodeFunc, err := testCase.restoreMethod(
  1075  		dave, backupFilePath, mnemonic,
  1076  	)
  1077  	if err != nil {
  1078  		t.Fatalf("unable to prep node restoration: %v", err)
  1079  	}
  1080  
  1081  	// Now that we're able to make our restored now, we'll shutdown the old
  1082  	// Dave node as we'll be storing it shortly below.
  1083  	shutdownAndAssert(net, t, dave)
  1084  
  1085  	carol.LogPrintf("=========== gonna suspend carol")
  1086  	// time.Sleep(sweep.DefaultBatchWindowDuration)
  1087  
  1088  	// To make sure the channel state is advanced correctly if the channel
  1089  	// peer is not online at first, we also shutdown Carol.
  1090  	restartCarol, err := net.SuspendNode(carol)
  1091  	require.NoError(t.t, err)
  1092  
  1093  	// Next, we'll make a new Dave and start the bulk of our recovery
  1094  	// workflow.
  1095  	dave, err = restoredNodeFunc()
  1096  	if err != nil {
  1097  		t.Fatalf("unable to restore node: %v", err)
  1098  	}
  1099  
  1100  	// First ensure that the on-chain balance is restored.
  1101  	err = wait.NoError(func() error {
  1102  		ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
  1103  		balReq := &lnrpc.WalletBalanceRequest{}
  1104  		daveBalResp, err := dave.WalletBalance(ctxt, balReq)
  1105  		if err != nil {
  1106  			return err
  1107  		}
  1108  
  1109  		daveBal := daveBalResp.ConfirmedBalance
  1110  		if daveBal <= 0 {
  1111  			return fmt.Errorf("expected positive balance, had %v",
  1112  				daveBal)
  1113  		}
  1114  
  1115  		return nil
  1116  	}, defaultTimeout)
  1117  	if err != nil {
  1118  		t.Fatalf("On-chain balance not restored: %v", err)
  1119  	}
  1120  
  1121  	// For our force close scenario we don't need the channel to be closed
  1122  	// by Carol since it was already force closed before we started the
  1123  	// recovery. All we need is for Carol to send us over the commit height
  1124  	// so we can sweep the time locked output with the correct commit point.
  1125  	if testCase.localForceClose {
  1126  		assertNumPendingChannels(t, dave, 0, 1, 0, 0)
  1127  
  1128  		err = restartCarol()
  1129  		require.NoError(t.t, err)
  1130  
  1131  		// Now that we have our new node up, we expect that it'll
  1132  		// re-connect to Carol automatically based on the restored
  1133  		// backup.
  1134  		net.EnsureConnected(t.t, dave, carol)
  1135  
  1136  		assertTimeLockSwept(
  1137  			net, t, carol, carolStartingBalance, dave,
  1138  			daveStartingBalance,
  1139  			commitTypeHasAnchors(testCase.commitmentType),
  1140  		)
  1141  
  1142  		return
  1143  	}
  1144  
  1145  	// We now check that the restored channel is in the proper state. It
  1146  	// should not yet be force closing as no connection with the remote
  1147  	// peer was established yet. We should also not be able to close the
  1148  	// channel.
  1149  	assertNumPendingChannels(t, dave, 1, 0, 0, 0)
  1150  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
  1151  	defer cancel()
  1152  	pendingChanResp, err := dave.PendingChannels(
  1153  		ctxt, &lnrpc.PendingChannelsRequest{},
  1154  	)
  1155  	require.NoError(t.t, err)
  1156  
  1157  	// We now need to make sure the server is fully started before we can
  1158  	// actually close the channel. This is the first check in CloseChannel
  1159  	// so we can try with a nil channel point until we get the correct error
  1160  	// to find out if Dave is fully started.
  1161  	err = wait.Predicate(func() bool {
  1162  		const expectedErr = "must specify channel point"
  1163  		ctxc, cancel := context.WithCancel(ctxt)
  1164  		defer cancel()
  1165  
  1166  		resp, err := dave.CloseChannel(
  1167  			ctxc, &lnrpc.CloseChannelRequest{},
  1168  		)
  1169  		if err != nil {
  1170  			return false
  1171  		}
  1172  
  1173  		defer func() { _ = resp.CloseSend() }()
  1174  
  1175  		_, err = resp.Recv()
  1176  		if err != nil && strings.Contains(err.Error(), expectedErr) {
  1177  			return true
  1178  		}
  1179  
  1180  		return false
  1181  	}, defaultTimeout)
  1182  	require.NoError(t.t, err)
  1183  
  1184  	// We also want to make sure we cannot force close in this state. That
  1185  	// would get the state machine in a weird state.
  1186  	chanPointParts := strings.Split(
  1187  		pendingChanResp.WaitingCloseChannels[0].Channel.ChannelPoint,
  1188  		":",
  1189  	)
  1190  	chanPointIndex, _ := strconv.ParseUint(chanPointParts[1], 10, 32)
  1191  	resp, err := dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{
  1192  		ChannelPoint: &lnrpc.ChannelPoint{
  1193  			FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{
  1194  				FundingTxidStr: chanPointParts[0],
  1195  			},
  1196  			OutputIndex: uint32(chanPointIndex),
  1197  		},
  1198  		Force: true,
  1199  	})
  1200  
  1201  	// We don't get an error directly but only when reading the first
  1202  	// message of the stream.
  1203  	require.NoError(t.t, err)
  1204  	_, err = resp.Recv()
  1205  	require.Error(t.t, err)
  1206  	require.Contains(t.t, err.Error(), "cannot close channel with state: ")
  1207  	require.Contains(t.t, err.Error(), "ChanStatusRestored")
  1208  
  1209  	// Increase the fee estimate so that the following force close tx will
  1210  	// be cpfp'ed. This needs to be chosen at the correct rate so that the
  1211  	// anchor commitment is still broadcast.
  1212  	net.SetFeeEstimate(12500)
  1213  
  1214  	// Now that we have ensured that the channels restored by the backup are
  1215  	// in the correct state even without the remote peer telling us so,
  1216  	// let's start up Carol again.
  1217  	err = restartCarol()
  1218  	require.NoError(t.t, err)
  1219  
  1220  	numUTXOs := 1
  1221  	if commitTypeHasAnchors(testCase.commitmentType) {
  1222  		numUTXOs = 2
  1223  	}
  1224  	assertNumUTXOs(t.t, carol, numUTXOs)
  1225  
  1226  	// Now that we have our new node up, we expect that it'll re-connect to
  1227  	// Carol automatically based on the restored backup.
  1228  	net.EnsureConnected(t.t, dave, carol)
  1229  
  1230  	// Leave enough time for the sweep txs to be generated and broadcast
  1231  	// using the correct fee estimate.
  1232  	time.Sleep(sweep.DefaultBatchWindowDuration + time.Second)
  1233  
  1234  	// TODO(roasbeef): move dave restarts?
  1235  
  1236  	// Now we'll assert that both sides properly execute the DLP protocol.
  1237  	// We grab their balances now to ensure that they're made whole at the
  1238  	// end of the protocol.
  1239  	assertDLPExecuted(
  1240  		net, t, carol, carolStartingBalance, dave, daveStartingBalance,
  1241  		testCase.commitmentType,
  1242  	)
  1243  }
  1244  
  1245  // createLegacyRevocationChannel creates a single channel using the legacy
  1246  // revocation producer format by using PSBT to signal a special pending channel
  1247  // ID.
  1248  func createLegacyRevocationChannel(net *lntest.NetworkHarness, t *harnessTest,
  1249  	chanAmt, pushAmt dcrutil.Amount, from, to *lntest.HarnessNode) {
  1250  
  1251  	t.Skipf("Test disabled until support for PSBTs is added")
  1252  
  1253  	ctxb := context.Background()
  1254  
  1255  	// We'll signal to the wallet that we also want to create a channel with
  1256  	// the legacy revocation producer format that relies on deriving a
  1257  	// private key from the key ring. This is only available during itests
  1258  	// to make sure we don't hard depend on the DerivePrivKey method of the
  1259  	// key ring. We can signal the wallet by setting a custom pending
  1260  	// channel ID. To be able to do that, we need to set a funding shim
  1261  	// which is easiest by using PSBT funding. The ID is the hex
  1262  	// representation of the string "legacy-revocation".
  1263  	itestLegacyFormatChanID := [32]byte{
  1264  		0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x2d, 0x72, 0x65, 0x76,
  1265  		0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
  1266  	}
  1267  	ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
  1268  	defer cancel()
  1269  	openChannelReq := lntest.OpenChannelParams{
  1270  		Amt:     chanAmt,
  1271  		PushAmt: pushAmt,
  1272  		FundingShim: &lnrpc.FundingShim{
  1273  			Shim: &lnrpc.FundingShim_PsbtShim{
  1274  				PsbtShim: &lnrpc.PsbtShim{
  1275  					PendingChanId: itestLegacyFormatChanID[:],
  1276  				},
  1277  			},
  1278  		},
  1279  	}
  1280  	chanUpdates, tempPsbt, err := openChannelPsbt(
  1281  		ctxt, from, to, openChannelReq,
  1282  	)
  1283  	require.NoError(t.t, err)
  1284  
  1285  	// Fund the PSBT by using the source node's wallet.
  1286  	ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
  1287  	defer cancel()
  1288  	fundReq := &walletrpc.FundPsbtRequest{
  1289  		Template: &walletrpc.FundPsbtRequest_Psbt{
  1290  			Psbt: tempPsbt,
  1291  		},
  1292  		Fees: &walletrpc.FundPsbtRequest_AtomsPerByte{
  1293  			AtomsPerByte: 2,
  1294  		},
  1295  	}
  1296  	fundResp, err := from.WalletKitClient.FundPsbt(ctxt, fundReq)
  1297  	require.NoError(t.t, err)
  1298  
  1299  	// We have a PSBT that has no witness data yet, which is exactly what we
  1300  	// need for the next step of verifying the PSBT with the funding intents.
  1301  	_, err = from.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
  1302  		Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
  1303  			PsbtVerify: &lnrpc.FundingPsbtVerify{
  1304  				PendingChanId: itestLegacyFormatChanID[:],
  1305  				FundedPsbt:    fundResp.FundedPsbt,
  1306  			},
  1307  		},
  1308  	})
  1309  	require.NoError(t.t, err)
  1310  
  1311  	// Now we'll ask the source node's wallet to sign the PSBT so we can
  1312  	// finish the funding flow.
  1313  	ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
  1314  	defer cancel()
  1315  	finalizeReq := &walletrpc.FinalizePsbtRequest{
  1316  		FundedPsbt: fundResp.FundedPsbt,
  1317  	}
  1318  	finalizeRes, err := from.WalletKitClient.FinalizePsbt(
  1319  		ctxt, finalizeReq,
  1320  	)
  1321  	require.NoError(t.t, err)
  1322  
  1323  	// We've signed our PSBT now, let's pass it to the intent again.
  1324  	_, err = from.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
  1325  		Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
  1326  			PsbtFinalize: &lnrpc.FundingPsbtFinalize{
  1327  				PendingChanId: itestLegacyFormatChanID[:],
  1328  				SignedPsbt:    finalizeRes.SignedPsbt,
  1329  			},
  1330  		},
  1331  	})
  1332  	require.NoError(t.t, err)
  1333  
  1334  	// Consume the "channel pending" update. This waits until the funding
  1335  	// transaction was fully compiled.
  1336  	ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
  1337  	defer cancel()
  1338  	updateResp, err := receiveChanUpdate(ctxt, chanUpdates)
  1339  	require.NoError(t.t, err)
  1340  	upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
  1341  	require.True(t.t, ok)
  1342  	chanPoint := &lnrpc.ChannelPoint{
  1343  		FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  1344  			FundingTxidBytes: upd.ChanPending.Txid,
  1345  		},
  1346  		OutputIndex: upd.ChanPending.OutputIndex,
  1347  	}
  1348  
  1349  	_ = mineBlocks(t, net, 6, 1)
  1350  	err = from.WaitForNetworkChannelOpen(chanPoint)
  1351  	require.NoError(t.t, err)
  1352  	err = to.WaitForNetworkChannelOpen(chanPoint)
  1353  	require.NoError(t.t, err)
  1354  }
  1355  
  1356  // chanRestoreViaRPC is a helper test method that returns a nodeRestorer
  1357  // instance which will restore the target node from a password+seed, then
  1358  // trigger a SCB restore using the RPC interface.
  1359  func chanRestoreViaRPC(net *lntest.NetworkHarness, password []byte,
  1360  	mnemonic []string, multi []byte,
  1361  	oldNode *lntest.HarnessNode) (nodeRestorer, error) {
  1362  
  1363  	backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
  1364  		MultiChanBackup: multi,
  1365  	}
  1366  
  1367  	ctxb := context.Background()
  1368  
  1369  	return func() (*lntest.HarnessNode, error) {
  1370  		newNode, err := net.RestoreNodeWithSeed(
  1371  			"dave", nil, password, mnemonic, "", 1000, nil,
  1372  			copyPorts(oldNode),
  1373  		)
  1374  		if err != nil {
  1375  			return nil, fmt.Errorf("unable to "+
  1376  				"restore node: %v", err)
  1377  		}
  1378  
  1379  		_, err = newNode.RestoreChannelBackups(
  1380  			ctxb, &lnrpc.RestoreChanBackupRequest{
  1381  				Backup: backup,
  1382  			},
  1383  		)
  1384  		if err != nil {
  1385  			return nil, fmt.Errorf("unable "+
  1386  				"to restore backups: %v", err)
  1387  		}
  1388  
  1389  		return newNode, nil
  1390  	}, nil
  1391  }
  1392  
  1393  // copyPorts returns a node option function that copies the ports of an existing
  1394  // node over to the newly created one.
  1395  func copyPorts(oldNode *lntest.HarnessNode) lntest.NodeOption {
  1396  	return func(cfg *lntest.BaseNodeConfig) {
  1397  		cfg.P2PPort = oldNode.Cfg.P2PPort
  1398  		cfg.RPCPort = oldNode.Cfg.RPCPort
  1399  		cfg.RESTPort = oldNode.Cfg.RESTPort
  1400  		cfg.ProfilePort = oldNode.Cfg.ProfilePort
  1401  	}
  1402  }
  1403  
  1404  func rpcPointToWirePoint(t *harnessTest,
  1405  	chanPoint *lnrpc.ChannelPoint) wire.OutPoint {
  1406  
  1407  	op, err := lntest.MakeOutpoint(chanPoint)
  1408  	require.NoError(t.t, err, "unable to get txid")
  1409  
  1410  	return op
  1411  }