github.com/decred/dcrlnd@v0.7.6/lntest/harness_net.go (about)

     1  package lntest
     2  
     3  import (
     4  	"context"
     5  	"encoding/hex"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"io/ioutil"
    10  	"net/http"
    11  	"os"
    12  	"path/filepath"
    13  	"strings"
    14  	"sync"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/decred/dcrlnd"
    19  	"github.com/decred/dcrlnd/input"
    20  	"github.com/decred/dcrlnd/kvdb/etcd"
    21  	"github.com/decred/dcrlnd/lnrpc"
    22  	"github.com/decred/dcrlnd/lntest/wait"
    23  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    24  	"github.com/decred/dcrlnd/lnwire"
    25  	"github.com/stretchr/testify/require"
    26  	"golang.org/x/sync/errgroup"
    27  
    28  	"github.com/decred/dcrd/chaincfg/chainhash"
    29  	"github.com/decred/dcrd/chaincfg/v3"
    30  	"github.com/decred/dcrd/dcrutil/v4"
    31  	"github.com/decred/dcrd/txscript/v4/stdaddr"
    32  	"github.com/decred/dcrd/wire"
    33  	"google.golang.org/grpc/grpclog"
    34  )
    35  
    36  // DefaultCSV is the CSV delay (remotedelay) we will start our test nodes with.
    37  const DefaultCSV = 4
    38  
    39  // NodeOption is a function for updating a node's configuration.
    40  type NodeOption func(*BaseNodeConfig)
    41  
    42  // NetworkHarness is an integration testing harness for the lightning network.
    43  // Building on top of HarnessNode, it is responsible for handling interactions
    44  // among different nodes. The harness by default is created with two active
    45  // nodes on the network:
    46  // Alice and Bob.
    47  type NetworkHarness struct {
    48  	netParams *chaincfg.Params
    49  
    50  	// currentTestCase holds the name for the currently run test case.
    51  	currentTestCase string
    52  
    53  	// lndBinary is the full path to the lnd binary that was specifically
    54  	// compiled with all required itest flags.
    55  	lndBinary string
    56  
    57  	// Miner is a reference to a running full node that can be used to
    58  	// create new blocks on the network.
    59  	Miner *HarnessMiner
    60  
    61  	// BackendCfg houses the information necessary to use a node as LND
    62  	// chain backend, such as rpc configuration, P2P information etc.
    63  	BackendCfg BackendConfig
    64  
    65  	activeNodes map[int]*HarnessNode
    66  
    67  	nodesByPub map[string]*HarnessNode
    68  
    69  	// Alice and Bob are the initial seeder nodes that are automatically
    70  	// created to be the initial participants of the test network.
    71  	Alice *HarnessNode
    72  	Bob   *HarnessNode
    73  
    74  	// dbBackend sets the database backend to use.
    75  	dbBackend DatabaseBackend
    76  
    77  	// Channel for transmitting stderr output from failed lightning node
    78  	// to main process.
    79  	lndErrorChan chan error
    80  
    81  	// feeService is a web service that provides external fee estimates to
    82  	// lnd.
    83  	feeService *feeService
    84  
    85  	// runCtx is a context with cancel method. It's used to signal when the
    86  	// node needs to quit, and used as the parent context when spawning
    87  	// children contexts for RPC requests.
    88  	runCtx context.Context
    89  	cancel context.CancelFunc
    90  
    91  	mtx sync.Mutex
    92  }
    93  
    94  // NewNetworkHarness creates a new network test harness.
    95  // TODO(roasbeef): add option to use golang's build library to a binary of the
    96  // current repo. This will save developers from having to manually `go install`
    97  // within the repo each time before changes
    98  func NewNetworkHarness(m *HarnessMiner, b BackendConfig, lndBinary string,
    99  	dbBackend DatabaseBackend) (*NetworkHarness, error) {
   100  
   101  	feeService := startFeeService()
   102  
   103  	ctxt, cancel := context.WithCancel(context.Background())
   104  
   105  	n := NetworkHarness{
   106  		activeNodes:  make(map[int]*HarnessNode),
   107  		nodesByPub:   make(map[string]*HarnessNode),
   108  		lndErrorChan: make(chan error),
   109  		netParams:    m.ActiveNet,
   110  		Miner:        m,
   111  		BackendCfg:   b,
   112  		feeService:   feeService,
   113  		runCtx:       ctxt,
   114  		cancel:       cancel,
   115  		lndBinary:    lndBinary,
   116  		dbBackend:    dbBackend,
   117  	}
   118  	return &n, nil
   119  }
   120  
   121  // LookUpNodeByPub queries the set of active nodes to locate a node according
   122  // to its public key. The second value will be true if the node was found, and
   123  // false otherwise.
   124  func (n *NetworkHarness) LookUpNodeByPub(pubStr string) (*HarnessNode, error) {
   125  	n.mtx.Lock()
   126  	defer n.mtx.Unlock()
   127  
   128  	node, ok := n.nodesByPub[pubStr]
   129  	if !ok {
   130  		return nil, fmt.Errorf("unable to find node")
   131  	}
   132  
   133  	return node, nil
   134  }
   135  
   136  // ProcessErrors returns a channel used for reporting any fatal process errors.
   137  // If any of the active nodes within the harness' test network incur a fatal
   138  // error, that error is sent over this channel.
   139  func (n *NetworkHarness) ProcessErrors() <-chan error {
   140  	return n.lndErrorChan
   141  }
   142  
   143  // SetUp starts the initial seeder nodes within the test harness. The initial
   144  // node's wallets will be funded wallets with ten 1 DCR outputs each. Finally
   145  // rpc clients capable of communicating with the initial seeder nodes are
   146  // created. Nodes are initialized with the given extra command line flags, which
   147  // should be formatted properly - "--arg=value".
   148  func (n *NetworkHarness) SetUp(t *testing.T,
   149  	testCase string, lndArgs []string) error {
   150  
   151  	// Swap out grpc's default logger with out fake logger which drops the
   152  	// statements on the floor.
   153  	fakeLogger := grpclog.NewLoggerV2(io.Discard, io.Discard, io.Discard)
   154  	grpclog.SetLoggerV2(fakeLogger)
   155  	n.currentTestCase = testCase
   156  
   157  	// Start the initial seeder nodes within the test network, then connect
   158  	// their respective RPC clients.
   159  	eg := errgroup.Group{}
   160  	eg.Go(func() error {
   161  		var err error
   162  		n.Alice, err = n.newNode(
   163  			"Alice", lndArgs, false, nil, n.dbBackend, true,
   164  		)
   165  		return err
   166  	})
   167  	eg.Go(func() error {
   168  		var err error
   169  		n.Bob, err = n.newNode(
   170  			"Bob", lndArgs, false, nil, n.dbBackend, true,
   171  		)
   172  		return err
   173  	})
   174  	require.NoError(t, eg.Wait())
   175  
   176  	// First, make a connection between the two nodes. This will wait until
   177  	// both nodes are fully started since the Connect RPC is guarded behind
   178  	// the server.Started() flag that waits for all subsystems to be ready.
   179  	n.ConnectNodes(t, n.Alice, n.Bob)
   180  
   181  	// Load up the wallets of the seeder nodes with 10 outputs of 1 BTC
   182  	// each.
   183  	addrReq := &lnrpc.NewAddressRequest{
   184  		Type: lnrpc.AddressType_PUBKEY_HASH,
   185  	}
   186  	clients := []lnrpc.LightningClient{n.Alice, n.Bob}
   187  	for _, client := range clients {
   188  
   189  		// Generate 10 addresses first, then send the outputs on a separate
   190  		// loop to prevent triggering dcrwallet's #1372 deadlock condition.
   191  		nbOutputs := 10
   192  		scripts := make([][]byte, nbOutputs)
   193  		for i := 0; i < nbOutputs; i++ {
   194  			resp, err := client.NewAddress(n.runCtx, addrReq)
   195  			if err != nil {
   196  				return err
   197  			}
   198  			addr, err := stdaddr.DecodeAddress(resp.Address, n.netParams)
   199  			if err != nil {
   200  				return err
   201  			}
   202  			addrScript, err := input.PayToAddrScript(addr)
   203  			if err != nil {
   204  				return err
   205  			}
   206  
   207  			scripts[i] = addrScript
   208  		}
   209  
   210  		// Wait a bit before sending, to allow the wallet to lock the address
   211  		// manager and not trigger #1372.
   212  		time.Sleep(time.Millisecond * 100)
   213  
   214  		// Send an output to each address.
   215  		for i := 0; i < nbOutputs; i++ {
   216  			output := &wire.TxOut{
   217  				PkScript: scripts[i],
   218  				Value:    dcrutil.AtomsPerCoin,
   219  			}
   220  
   221  			_, err := n.Miner.SendOutputs(n.runCtx, []*wire.TxOut{output}, 7500)
   222  			if err != nil {
   223  				return err
   224  			}
   225  		}
   226  	}
   227  
   228  	// We generate several blocks in order to give the outputs created
   229  	// above a good number of confirmations.
   230  	if _, err := n.Generate(10); err != nil {
   231  		return err
   232  	}
   233  
   234  	// Now we want to wait for the nodes to catch up.
   235  	if err := n.Alice.WaitForBlockchainSync(); err != nil {
   236  		return err
   237  	}
   238  	if err := n.Bob.WaitForBlockchainSync(); err != nil {
   239  		return err
   240  	}
   241  
   242  	// Now block until both wallets have fully synced up.
   243  	expectedBalance := int64(dcrutil.AtomsPerCoin * 10)
   244  	balReq := &lnrpc.WalletBalanceRequest{}
   245  	balanceTicker := time.NewTicker(time.Millisecond * 200)
   246  	defer balanceTicker.Stop()
   247  	balanceTimeout := time.After(DefaultTimeout)
   248  out:
   249  	for {
   250  		select {
   251  		case <-balanceTicker.C:
   252  			aliceResp, err := n.Alice.WalletBalance(n.runCtx, balReq)
   253  			if err != nil {
   254  				return err
   255  			}
   256  			bobResp, err := n.Bob.WalletBalance(n.runCtx, balReq)
   257  			if err != nil {
   258  				return err
   259  			}
   260  
   261  			if aliceResp.ConfirmedBalance == expectedBalance &&
   262  				bobResp.ConfirmedBalance == expectedBalance {
   263  				break out
   264  			}
   265  		case <-balanceTimeout:
   266  			return fmt.Errorf("balances not synced after deadline")
   267  		}
   268  	}
   269  
   270  	return nil
   271  }
   272  
   273  // TearDown tears down all active nodes within the test lightning network.
   274  func (n *NetworkHarness) TearDown() error {
   275  	for _, node := range n.activeNodes {
   276  		if err := n.ShutdownNode(node); err != nil {
   277  			return err
   278  		}
   279  	}
   280  
   281  	return nil
   282  }
   283  
   284  // Stop stops the test harness.
   285  func (n *NetworkHarness) Stop() {
   286  	close(n.lndErrorChan)
   287  	n.cancel()
   288  
   289  	n.feeService.stop()
   290  }
   291  
   292  // extraArgsEtcd returns extra args for configuring LND to use an external etcd
   293  // database (for remote channel DB and wallet DB).
   294  func extraArgsEtcd(etcdCfg *etcd.Config, name string, cluster bool,
   295  	leaderSessionTTL int) []string {
   296  
   297  	extraArgs := []string{
   298  		"--db.backend=etcd",
   299  		fmt.Sprintf("--db.etcd.host=%v", etcdCfg.Host),
   300  		fmt.Sprintf("--db.etcd.user=%v", etcdCfg.User),
   301  		fmt.Sprintf("--db.etcd.pass=%v", etcdCfg.Pass),
   302  		fmt.Sprintf("--db.etcd.namespace=%v", etcdCfg.Namespace),
   303  	}
   304  
   305  	if etcdCfg.InsecureSkipVerify {
   306  		extraArgs = append(extraArgs, "--db.etcd.insecure_skip_verify")
   307  
   308  	}
   309  
   310  	if cluster {
   311  		clusterArgs := []string{
   312  			"--cluster.enable-leader-election",
   313  			fmt.Sprintf("--cluster.id=%v", name),
   314  			fmt.Sprintf("--cluster.leader-session-ttl=%v",
   315  				leaderSessionTTL),
   316  		}
   317  		extraArgs = append(extraArgs, clusterArgs...)
   318  	}
   319  
   320  	return extraArgs
   321  }
   322  
   323  // NewNodeWithSeedEtcd starts a new node with seed that'll use an external
   324  // etcd database as its (remote) channel and wallet DB. The passsed cluster
   325  // flag indicates that we'd like the node to join the cluster leader election.
   326  func (n *NetworkHarness) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
   327  	password []byte, entropy []byte, statelessInit, cluster bool,
   328  	leaderSessionTTL int) (*HarnessNode, []string, []byte, error) {
   329  
   330  	// We don't want to use the embedded etcd instance.
   331  	const dbBackend = BackendBbolt
   332  
   333  	extraArgs := extraArgsEtcd(etcdCfg, name, cluster, leaderSessionTTL)
   334  	return n.newNodeWithSeed(
   335  		name, extraArgs, password, entropy, statelessInit, dbBackend,
   336  	)
   337  }
   338  
   339  // NewNodeWithSeedEtcd starts a new node with seed that'll use an external
   340  // etcd database as its (remote) channel and wallet DB. The passsed cluster
   341  // flag indicates that we'd like the node to join the cluster leader election.
   342  // If the wait flag is false then we won't wait until RPC is available (this is
   343  // useful when the node is not expected to become the leader right away).
   344  func (n *NetworkHarness) NewNodeEtcd(name string, etcdCfg *etcd.Config,
   345  	password []byte, cluster, wait bool, leaderSessionTTL int) (
   346  	*HarnessNode, error) {
   347  
   348  	// We don't want to use the embedded etcd instance.
   349  	const dbBackend = BackendBbolt
   350  
   351  	extraArgs := extraArgsEtcd(etcdCfg, name, cluster, leaderSessionTTL)
   352  	return n.newNode(name, extraArgs, true, password, dbBackend, wait)
   353  }
   354  
   355  // NewNode fully initializes a returns a new HarnessNode bound to the
   356  // current instance of the network harness. The created node is running, but
   357  // not yet connected to other nodes within the network.
   358  func (n *NetworkHarness) NewNode(t *testing.T,
   359  	name string, extraArgs []string, opts ...NodeOption) *HarnessNode {
   360  
   361  	node, err := n.newNode(
   362  		name, extraArgs, false, nil, n.dbBackend, true, opts...,
   363  	)
   364  	require.NoErrorf(t, err, "unable to create new node for %s", name)
   365  
   366  	return node
   367  }
   368  
   369  // NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
   370  // aezeed. The provided password is used as both the aezeed password and the
   371  // wallet password. The generated mnemonic is returned along with the
   372  // initialized harness node.
   373  func (n *NetworkHarness) NewNodeWithSeed(name string, extraArgs []string,
   374  	password []byte, statelessInit bool) (*HarnessNode, []string, []byte,
   375  	error) {
   376  
   377  	return n.newNodeWithSeed(
   378  		name, extraArgs, password, nil, statelessInit, n.dbBackend,
   379  	)
   380  }
   381  
   382  func (n *NetworkHarness) newNodeWithSeed(name string, extraArgs []string,
   383  	password, entropy []byte, statelessInit bool, dbBackend DatabaseBackend) (
   384  	*HarnessNode, []string, []byte, error) {
   385  
   386  	node, err := n.newNode(
   387  		name, extraArgs, true, password, dbBackend, true,
   388  	)
   389  	if err != nil {
   390  		return nil, nil, nil, err
   391  	}
   392  
   393  	// Create a request to generate a new aezeed. The new seed will have the
   394  	// same password as the internal wallet.
   395  	genSeedReq := &lnrpc.GenSeedRequest{
   396  		AezeedPassphrase: password,
   397  		SeedEntropy:      entropy,
   398  	}
   399  
   400  	ctxt, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
   401  	defer cancel()
   402  
   403  	var genSeedResp *lnrpc.GenSeedResponse
   404  	if err := wait.NoError(func() error {
   405  		genSeedResp, err = node.GenSeed(ctxt, genSeedReq)
   406  		return err
   407  	}, DefaultTimeout); err != nil {
   408  		return nil, nil, nil, err
   409  	}
   410  
   411  	// With the seed created, construct the init request to the node,
   412  	// including the newly generated seed.
   413  	initReq := &lnrpc.InitWalletRequest{
   414  		WalletPassword:     password,
   415  		CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
   416  		AezeedPassphrase:   password,
   417  		StatelessInit:      statelessInit,
   418  	}
   419  
   420  	// Pass the init request via rpc to finish unlocking the node. This will
   421  	// also initialize the macaroon-authenticated LightningClient.
   422  	response, err := node.Init(initReq)
   423  	if err != nil {
   424  		return nil, nil, nil, fmt.Errorf("unable to init new node: %v", err)
   425  	}
   426  
   427  	// With the node started, we can now record its public key within the
   428  	// global mapping.
   429  	n.RegisterNode(node)
   430  
   431  	// In stateless initialization mode we get a macaroon back that we have
   432  	// to return to the test, otherwise gRPC calls won't be possible since
   433  	// there are no macaroon files created in that mode.
   434  	// In stateful init the admin macaroon will just be nil.
   435  	return node, genSeedResp.CipherSeedMnemonic, response.AdminMacaroon, nil
   436  }
   437  
   438  func (n *NetworkHarness) NewNodeRemoteSigner(name string, extraArgs []string,
   439  	password []byte, watchOnly *lnrpc.WatchOnly) (*HarnessNode, error) {
   440  
   441  	node, err := n.newNode(
   442  		name, extraArgs, true, password, n.dbBackend, true,
   443  	)
   444  	if err != nil {
   445  		return nil, err
   446  	}
   447  
   448  	// With the seed created, construct the init request to the node,
   449  	// including the newly generated seed.
   450  	initReq := &lnrpc.InitWalletRequest{
   451  		WalletPassword: password,
   452  		WatchOnly:      watchOnly,
   453  	}
   454  
   455  	// Pass the init request via rpc to finish unlocking the node. This will
   456  	// also initialize the macaroon-authenticated LightningClient.
   457  	_, err = node.Init(initReq)
   458  	if err != nil {
   459  		return nil, err
   460  	}
   461  
   462  	// With the node started, we can now record its public key within the
   463  	// global mapping.
   464  	n.RegisterNode(node)
   465  
   466  	return node, nil
   467  }
   468  
   469  // RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
   470  // password, recovery window, and optionally a set of static channel backups.
   471  // After providing the initialization request to unlock the node, this method
   472  // will finish initializing the LightningClient such that the HarnessNode can
   473  // be used for regular rpc operations.
   474  func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string,
   475  	password []byte, mnemonic []string, rootKey string, recoveryWindow int32,
   476  	chanBackups *lnrpc.ChanBackupSnapshot,
   477  	opts ...NodeOption) (*HarnessNode, error) {
   478  
   479  	node, err := n.newNode(
   480  		name, extraArgs, true, password, n.dbBackend, true, opts...,
   481  	)
   482  	if err != nil {
   483  		return nil, err
   484  	}
   485  
   486  	initReq := &lnrpc.InitWalletRequest{
   487  		WalletPassword:     password,
   488  		CipherSeedMnemonic: mnemonic,
   489  		AezeedPassphrase:   password,
   490  		ExtendedMasterKey:  rootKey,
   491  		RecoveryWindow:     recoveryWindow,
   492  		ChannelBackups:     chanBackups,
   493  	}
   494  
   495  	_, err = node.Init(initReq)
   496  	if err != nil {
   497  		return nil, err
   498  	}
   499  
   500  	// With the node started, we can now record its public key within the
   501  	// global mapping.
   502  	n.RegisterNode(node)
   503  
   504  	return node, nil
   505  }
   506  
   507  // newNode initializes a new HarnessNode, supporting the ability to initialize a
   508  // wallet with or without a seed. If hasSeed is false, the returned harness node
   509  // can be used immediately. Otherwise, the node will require an additional
   510  // initialization phase where the wallet is either created or restored.
   511  func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool,
   512  	password []byte, dbBackend DatabaseBackend, wait bool, opts ...NodeOption) (
   513  	*HarnessNode, error) {
   514  
   515  	cfg := &BaseNodeConfig{
   516  		Name:              name,
   517  		LogFilenamePrefix: n.currentTestCase,
   518  		HasSeed:           hasSeed,
   519  		BackendCfg:        n.BackendCfg,
   520  		Password:          password,
   521  		NetParams:         n.netParams,
   522  		ExtraArgs:         extraArgs,
   523  		RemoteWallet:      useRemoteWallet(),
   524  		DcrwNode:          useDcrwNode(),
   525  		FeeURL:            n.feeService.url,
   526  		DbBackend:         dbBackend,
   527  	}
   528  	for _, opt := range opts {
   529  		opt(cfg)
   530  	}
   531  
   532  	node, err := newNode(cfg)
   533  	if err != nil {
   534  		return nil, err
   535  	}
   536  
   537  	// Put node in activeNodes to ensure Shutdown is called even if Start
   538  	// returns an error.
   539  	n.mtx.Lock()
   540  	n.activeNodes[node.NodeID] = node
   541  	n.mtx.Unlock()
   542  
   543  	err = node.start(n.lndBinary, n.lndErrorChan, wait)
   544  	if err != nil {
   545  		return nil, fmt.Errorf("unable to start new node: %v", err)
   546  	}
   547  
   548  	// If this node is to have a seed, it will need to be unlocked or
   549  	// initialized via rpc. Delay registering it with the network until it
   550  	// can be driven via an unlocked rpc connection.
   551  	if node.Cfg.HasSeed {
   552  		return node, nil
   553  	}
   554  
   555  	// With the node started, we can now record its public key within the
   556  	// global mapping.
   557  	n.RegisterNode(node)
   558  
   559  	return node, nil
   560  }
   561  
   562  // RegisterNode records a new HarnessNode in the NetworkHarnesses map of known
   563  // nodes. This method should only be called with nodes that have successfully
   564  // retrieved their public keys via FetchNodeInfo.
   565  func (n *NetworkHarness) RegisterNode(node *HarnessNode) {
   566  	n.mtx.Lock()
   567  	n.nodesByPub[node.PubKeyStr] = node
   568  	n.mtx.Unlock()
   569  }
   570  
   571  func (n *NetworkHarness) connect(ctx context.Context,
   572  	req *lnrpc.ConnectPeerRequest, a *HarnessNode) error {
   573  
   574  	syncTimeout := time.After(DefaultTimeout)
   575  tryconnect:
   576  	if _, err := a.ConnectPeer(ctx, req); err != nil {
   577  		// If the chain backend is still syncing, retry.
   578  		if strings.Contains(err.Error(), dcrlnd.ErrServerNotActive.Error()) ||
   579  			strings.Contains(err.Error(), "i/o timeout") {
   580  
   581  			select {
   582  			case <-time.After(100 * time.Millisecond):
   583  				goto tryconnect
   584  			case <-syncTimeout:
   585  				return fmt.Errorf("chain backend did not " +
   586  					"finish syncing")
   587  			}
   588  		}
   589  		return err
   590  	}
   591  
   592  	return nil
   593  }
   594  
   595  // EnsureConnected will try to connect to two nodes, returning no error if they
   596  // are already connected. If the nodes were not connected previously, this will
   597  // behave the same as ConnectNodes. If a pending connection request has already
   598  // been made, the method will block until the two nodes appear in each other's
   599  // peers list, or until the 15s timeout expires.
   600  func (n *NetworkHarness) EnsureConnected(t *testing.T, a, b *HarnessNode) {
   601  	ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout*2)
   602  	defer cancel()
   603  
   604  	// errConnectionRequested is used to signal that a connection was
   605  	// requested successfully, which is distinct from already being
   606  	// connected to the peer.
   607  	errConnectionRequested := errors.New("connection request in progress")
   608  
   609  	tryConnect := func(a, b *HarnessNode) error {
   610  		bInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{})
   611  		if err != nil {
   612  			return err
   613  		}
   614  
   615  		req := &lnrpc.ConnectPeerRequest{
   616  			Addr: &lnrpc.LightningAddress{
   617  				Pubkey: bInfo.IdentityPubkey,
   618  				Host:   b.Cfg.P2PAddr(),
   619  			},
   620  		}
   621  
   622  		var predErr error
   623  		err = wait.Predicate(func() bool {
   624  			ctx, cancel := context.WithTimeout(ctx, DefaultTimeout)
   625  			defer cancel()
   626  
   627  			err := n.connect(ctx, req, a)
   628  			switch {
   629  			// Request was successful, wait for both to display the
   630  			// connection.
   631  			case err == nil:
   632  				predErr = errConnectionRequested
   633  				return true
   634  
   635  			// If the two are already connected, we return early
   636  			// with no error.
   637  			case strings.Contains(
   638  				err.Error(), "already connected to peer",
   639  			):
   640  				predErr = nil
   641  				return true
   642  
   643  			default:
   644  				predErr = err
   645  				return false
   646  			}
   647  
   648  		}, DefaultTimeout)
   649  		if err != nil {
   650  			return fmt.Errorf("connection not succeeded within 15 "+
   651  				"seconds: %v", predErr)
   652  		}
   653  
   654  		return predErr
   655  	}
   656  
   657  	aErr := tryConnect(a, b)
   658  	bErr := tryConnect(b, a)
   659  	switch {
   660  	// If both reported already being connected to each other, we can exit
   661  	// early.
   662  	case aErr == nil && bErr == nil:
   663  
   664  	// Return any critical errors returned by either alice.
   665  	case aErr != nil && aErr != errConnectionRequested:
   666  		t.Fatalf(
   667  			"ensure connection between %s and %s failed "+
   668  				"with error from %s: %v",
   669  			a.Cfg.Name, b.Cfg.Name, a.Cfg.Name, aErr,
   670  		)
   671  
   672  	// Return any critical errors returned by either bob.
   673  	case bErr != nil && bErr != errConnectionRequested:
   674  		t.Fatalf("ensure connection between %s and %s failed "+
   675  			"with error from %s: %v",
   676  			a.Cfg.Name, b.Cfg.Name, b.Cfg.Name, bErr,
   677  		)
   678  
   679  	// Otherwise one or both requested a connection, so we wait for the
   680  	// peers lists to reflect the connection.
   681  	default:
   682  	}
   683  
   684  	findSelfInPeerList := func(a, b *HarnessNode) bool {
   685  		// If node B is seen in the ListPeers response from node A,
   686  		// then we can exit early as the connection has been fully
   687  		// established.
   688  		resp, err := b.ListPeers(ctx, &lnrpc.ListPeersRequest{})
   689  		if err != nil {
   690  			return false
   691  		}
   692  
   693  		for _, peer := range resp.Peers {
   694  			if peer.PubKey == a.PubKeyStr {
   695  				return true
   696  			}
   697  		}
   698  
   699  		return false
   700  	}
   701  
   702  	err := wait.Predicate(func() bool {
   703  		return findSelfInPeerList(a, b) && findSelfInPeerList(b, a)
   704  	}, DefaultTimeout)
   705  
   706  	require.NoErrorf(
   707  		t, err, "unable to connect %s to %s, "+
   708  			"got error: peers not connected within %v seconds",
   709  		a.Cfg.Name, b.Cfg.Name, DefaultTimeout,
   710  	)
   711  }
   712  
   713  // ConnectNodes attempts to create a connection between nodes a and b.
   714  func (n *NetworkHarness) ConnectNodes(t *testing.T, a, b *HarnessNode) {
   715  	n.connectNodes(t, a, b, false)
   716  }
   717  
   718  // ConnectNodesPerm attempts to connect nodes a and b and sets node b as
   719  // a peer that node a should persistently attempt to reconnect to if they
   720  // become disconnected.
   721  func (n *NetworkHarness) ConnectNodesPerm(t *testing.T,
   722  	a, b *HarnessNode) {
   723  
   724  	n.connectNodes(t, a, b, true)
   725  }
   726  
   727  // connectNodes establishes an encrypted+authenticated p2p connection from node
   728  // a towards node b. The function will return a non-nil error if the connection
   729  // was unable to be established. If the perm parameter is set to true then
   730  // node a will persistently attempt to reconnect to node b if they get
   731  // disconnected.
   732  //
   733  // NOTE: This function may block for up to 15-seconds as it will not return
   734  // until the new connection is detected as being known to both nodes.
   735  func (n *NetworkHarness) connectNodes(t *testing.T, a, b *HarnessNode,
   736  	perm bool) {
   737  
   738  	ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
   739  	defer cancel()
   740  
   741  	bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{})
   742  	require.NoErrorf(
   743  		t, err, "unable to connect %s to %s, got error: %v",
   744  		a.Cfg.Name, b.Cfg.Name, err,
   745  	)
   746  
   747  	req := &lnrpc.ConnectPeerRequest{
   748  		Addr: &lnrpc.LightningAddress{
   749  			Pubkey: bobInfo.IdentityPubkey,
   750  			Host:   b.Cfg.P2PAddr(),
   751  		},
   752  		Perm: perm,
   753  	}
   754  
   755  	err = n.connect(ctx, req, a)
   756  	require.NoErrorf(
   757  		t, err, "unable to connect %s to %s, got error: %v",
   758  		a.Cfg.Name, b.Cfg.Name, err,
   759  	)
   760  
   761  	err = wait.Predicate(func() bool {
   762  		// If node B is seen in the ListPeers response from node A,
   763  		// then we can exit early as the connection has been fully
   764  		// established.
   765  		resp, err := a.ListPeers(ctx, &lnrpc.ListPeersRequest{})
   766  		if err != nil {
   767  			return false
   768  		}
   769  
   770  		for _, peer := range resp.Peers {
   771  			if peer.PubKey == b.PubKeyStr {
   772  				return true
   773  			}
   774  		}
   775  
   776  		return false
   777  	}, DefaultTimeout)
   778  
   779  	require.NoErrorf(
   780  		t, err, "unable to connect %s to %s, "+
   781  			"got error: peers not connected within %v seconds",
   782  		a.Cfg.Name, b.Cfg.Name, DefaultTimeout,
   783  	)
   784  }
   785  
   786  // DisconnectNodes disconnects node a from node b by sending RPC message
   787  // from a node to b node
   788  func (n *NetworkHarness) DisconnectNodes(a, b *HarnessNode) error {
   789  	ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
   790  	defer cancel()
   791  
   792  	bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{})
   793  	if err != nil {
   794  		return err
   795  	}
   796  
   797  	req := &lnrpc.DisconnectPeerRequest{
   798  		PubKey: bobInfo.IdentityPubkey,
   799  	}
   800  
   801  	if _, err := a.DisconnectPeer(ctx, req); err != nil {
   802  		return err
   803  	}
   804  
   805  	return nil
   806  }
   807  
   808  // RestartNode attempts to restart a lightning node by shutting it down
   809  // cleanly, then restarting the process. This function is fully blocking. Upon
   810  // restart, the RPC connection to the node will be re-attempted, continuing iff
   811  // the connection attempt is successful. If the callback parameter is non-nil,
   812  // then the function will be executed after the node shuts down, but *before*
   813  // the process has been started up again.
   814  //
   815  // This method can be useful when testing edge cases such as a node broadcast
   816  // and invalidated prior state, or persistent state recovery, simulating node
   817  // crashes, etc. Additionally, each time the node is restarted, the caller can
   818  // pass a set of SCBs to pass in via the Unlock method allowing them to restore
   819  // channels during restart.
   820  func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error,
   821  	chanBackups ...*lnrpc.ChanBackupSnapshot) error {
   822  
   823  	err := n.RestartNodeNoUnlock(node, callback, true)
   824  	if err != nil {
   825  		return err
   826  	}
   827  
   828  	// If the node doesn't have a password set, then we can exit here as we
   829  	// don't need to unlock it.
   830  	if len(node.Cfg.Password) == 0 {
   831  		return nil
   832  	}
   833  
   834  	// Otherwise, we'll unlock the wallet, then complete the final steps
   835  	// for the node initialization process.
   836  	unlockReq := &lnrpc.UnlockWalletRequest{
   837  		WalletPassword: node.Cfg.Password,
   838  	}
   839  	if len(chanBackups) != 0 {
   840  		unlockReq.ChannelBackups = chanBackups[0]
   841  		unlockReq.RecoveryWindow = 1000
   842  	}
   843  
   844  	if err := node.Unlock(unlockReq); err != nil {
   845  		return err
   846  	}
   847  
   848  	// Give the node some time to catch up with the chain before we
   849  	// continue with the tests.
   850  	return node.WaitForBlockchainSync()
   851  }
   852  
   853  // RestartNodeNoUnlock attempts to restart a lightning node by shutting it down
   854  // cleanly, then restarting the process. In case the node was setup with a seed,
   855  // it will be left in the unlocked state. This function is fully blocking. If
   856  // the callback parameter is non-nil, then the function will be executed after
   857  // the node shuts down, but *before* the process has been started up again.
   858  func (n *NetworkHarness) RestartNodeNoUnlock(node *HarnessNode,
   859  	callback func() error, wait bool) error {
   860  
   861  	if err := node.stop(); err != nil {
   862  		return err
   863  	}
   864  
   865  	if callback != nil {
   866  		if err := callback(); err != nil {
   867  			return err
   868  		}
   869  	}
   870  
   871  	return node.start(n.lndBinary, n.lndErrorChan, wait)
   872  }
   873  
   874  // SuspendNode stops the given node and returns a callback that can be used to
   875  // start it again.
   876  func (n *NetworkHarness) SuspendNode(node *HarnessNode) (func() error, error) {
   877  	if err := node.stop(); err != nil {
   878  		return nil, err
   879  	}
   880  
   881  	restart := func() error {
   882  		return node.start(n.lndBinary, n.lndErrorChan, true)
   883  	}
   884  
   885  	return restart, nil
   886  }
   887  
   888  // ShutdownNode stops an active lnd process and returns when the process has
   889  // exited and any temporary directories have been cleaned up.
   890  func (n *NetworkHarness) ShutdownNode(node *HarnessNode) error {
   891  	if err := node.shutdown(); err != nil {
   892  		return err
   893  	}
   894  
   895  	delete(n.activeNodes, node.NodeID)
   896  	return nil
   897  }
   898  
   899  // KillNode kills the node (but won't wait for the node process to stop).
   900  func (n *NetworkHarness) KillNode(node *HarnessNode) error {
   901  	if err := node.kill(); err != nil {
   902  		return err
   903  	}
   904  
   905  	delete(n.activeNodes, node.NodeID)
   906  	return nil
   907  }
   908  
   909  // StopNode stops the target node, but doesn't yet clean up its directories.
   910  // This can be used to temporarily bring a node down during a test, to be later
   911  // started up again.
   912  func (n *NetworkHarness) StopNode(node *HarnessNode) error {
   913  	return node.stop()
   914  }
   915  
   916  // SaveProfilesPages hits profiles pages of all active nodes and writes it to
   917  // disk using a similar naming scheme as to the regular set of logs.
   918  func (n *NetworkHarness) SaveProfilesPages(t *testing.T) {
   919  	// Only write gorutine dumps if flag is active.
   920  	if !(*goroutineDump) {
   921  		return
   922  	}
   923  
   924  	for _, node := range n.activeNodes {
   925  		if err := saveProfilesPage(node); err != nil {
   926  			t.Logf("Logging follow-up error only, see rest of "+
   927  				"the log for actual cause: %v\n", err)
   928  		}
   929  	}
   930  }
   931  
   932  // saveProfilesPage saves the profiles page for the given node to file.
   933  func saveProfilesPage(node *HarnessNode) error {
   934  	resp, err := http.Get(
   935  		fmt.Sprintf(
   936  			"http://localhost:%d/debug/pprof/goroutine?debug=1",
   937  			node.Cfg.ProfilePort,
   938  		),
   939  	)
   940  	if err != nil {
   941  		return fmt.Errorf("failed to get profile page "+
   942  			"(node_id=%d, name=%s): %v",
   943  			node.NodeID, node.Cfg.Name, err)
   944  	}
   945  	defer resp.Body.Close()
   946  
   947  	body, err := ioutil.ReadAll(resp.Body)
   948  	if err != nil {
   949  		return fmt.Errorf("failed to read profile page "+
   950  			"(node_id=%d, name=%s): %v",
   951  			node.NodeID, node.Cfg.Name, err)
   952  	}
   953  
   954  	fileName := fmt.Sprintf(
   955  		"pprof-%d-%s-%s.log", node.NodeID, node.Cfg.Name,
   956  		hex.EncodeToString(node.PubKey[:logPubKeyBytes]),
   957  	)
   958  
   959  	logFile, err := os.Create(fileName)
   960  	if err != nil {
   961  		return fmt.Errorf("failed to create file for profile page "+
   962  			"(node_id=%d, name=%s): %v",
   963  			node.NodeID, node.Cfg.Name, err)
   964  	}
   965  	defer logFile.Close()
   966  
   967  	_, err = logFile.Write(body)
   968  	if err != nil {
   969  		return fmt.Errorf("failed to save profile page "+
   970  			"(node_id=%d, name=%s): %v",
   971  			node.NodeID, node.Cfg.Name, err)
   972  	}
   973  	return nil
   974  }
   975  
   976  // OpenChannelParams houses the params to specify when opening a new channel.
   977  type OpenChannelParams struct {
   978  	// Amt is the local amount being put into the channel.
   979  	Amt dcrutil.Amount
   980  
   981  	// PushAmt is the amount that should be pushed to the remote when the
   982  	// channel is opened.
   983  	PushAmt dcrutil.Amount
   984  
   985  	// Private is a boolan indicating whether the opened channel should be
   986  	// private.
   987  	Private bool
   988  
   989  	// SpendUnconfirmed is a boolean indicating whether we can utilize
   990  	// unconfirmed outputs to fund the channel.
   991  	SpendUnconfirmed bool
   992  
   993  	// MinHtlc is the htlc_minimum_m_atoms value set when opening the
   994  	// channel.
   995  	MinHtlc lnwire.MilliAtom
   996  
   997  	// RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
   998  	// channel, restricting the number of concurrent HTLCs the remote party
   999  	// can add to a commitment.
  1000  	RemoteMaxHtlcs uint16
  1001  
  1002  	// FundingShim is an optional funding shim that the caller can specify
  1003  	// in order to modify the channel funding workflow.
  1004  	FundingShim *lnrpc.FundingShim
  1005  
  1006  	// AtomsPerByte is the amount of atoms to spend in chain fees per byte
  1007  	// of the transaction.
  1008  	AtomsPerByte dcrutil.Amount
  1009  
  1010  	// CommitmentType is the commitment type that should be used for the
  1011  	// channel to be opened.
  1012  	CommitmentType lnrpc.CommitmentType
  1013  }
  1014  
  1015  // OpenChannel attempts to open a channel between srcNode and destNode with the
  1016  // passed channel funding parameters. If the passed context has a timeout, then
  1017  // if the timeout is reached before the channel pending notification is
  1018  // received, an error is returned. The confirmed boolean determines whether we
  1019  // should fund the channel with confirmed outputs or not.
  1020  func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode,
  1021  	p OpenChannelParams) (lnrpc.Lightning_OpenChannelClient, error) {
  1022  
  1023  	// Wait until srcNode and destNode have the latest chain synced.
  1024  	// Otherwise, we may run into a check within the funding manager that
  1025  	// prevents any funding workflows from being kicked off if the chain
  1026  	// isn't yet synced.
  1027  	if err := srcNode.WaitForBlockchainSync(); err != nil {
  1028  		return nil, fmt.Errorf("unable to sync srcNode chain: %v", err)
  1029  	}
  1030  	if err := destNode.WaitForBlockchainSync(); err != nil {
  1031  		return nil, fmt.Errorf("unable to sync destNode chain: %v", err)
  1032  	}
  1033  
  1034  	minConfs := int32(1)
  1035  	if p.SpendUnconfirmed {
  1036  		minConfs = 0
  1037  	}
  1038  
  1039  	openReq := &lnrpc.OpenChannelRequest{
  1040  		NodePubkey:         destNode.PubKey[:],
  1041  		LocalFundingAmount: int64(p.Amt),
  1042  		PushAtoms:          int64(p.PushAmt),
  1043  		Private:            p.Private,
  1044  		MinConfs:           minConfs,
  1045  		SpendUnconfirmed:   p.SpendUnconfirmed,
  1046  		MinHtlcMAtoms:      int64(p.MinHtlc),
  1047  		RemoteMaxHtlcs:     uint32(p.RemoteMaxHtlcs),
  1048  		FundingShim:        p.FundingShim,
  1049  		AtomsPerByte:       int64(p.AtomsPerByte),
  1050  		CommitmentType:     p.CommitmentType,
  1051  	}
  1052  
  1053  	// We need to use n.runCtx here to keep the response stream alive after
  1054  	// the function is returned.
  1055  	respStream, err := srcNode.OpenChannel(n.runCtx, openReq)
  1056  	if err != nil {
  1057  		return nil, fmt.Errorf("unable to open channel between "+
  1058  			"alice and bob: %v", err)
  1059  	}
  1060  
  1061  	chanOpen := make(chan struct{})
  1062  	errChan := make(chan error)
  1063  	go func() {
  1064  		// Consume the "channel pending" update. This waits until the
  1065  		// node notifies us that the final message in the channel
  1066  		// funding workflow has been sent to the remote node.
  1067  		resp, err := respStream.Recv()
  1068  		if err != nil {
  1069  			errChan <- err
  1070  			return
  1071  		}
  1072  		_, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
  1073  		if !ok {
  1074  			errChan <- fmt.Errorf("expected channel pending: "+
  1075  				"update, instead got %v", resp)
  1076  			return
  1077  		}
  1078  
  1079  		close(chanOpen)
  1080  	}()
  1081  
  1082  	select {
  1083  	case <-time.After(ChannelOpenTimeout):
  1084  		return nil, fmt.Errorf("timeout reached before chan pending "+
  1085  			"update sent: %v", err)
  1086  	case err := <-errChan:
  1087  		return nil, err
  1088  	case <-chanOpen:
  1089  		return respStream, nil
  1090  	}
  1091  }
  1092  
  1093  // OpenPendingChannel attempts to open a channel between srcNode and destNode
  1094  // with the passed channel funding parameters. If the passed context has a
  1095  // timeout, then if the timeout is reached before the channel pending
  1096  // notification is received, an error is returned.
  1097  func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode,
  1098  	amt dcrutil.Amount,
  1099  	pushAmt dcrutil.Amount) (*lnrpc.PendingUpdate, error) {
  1100  
  1101  	// Wait until srcNode and destNode have blockchain synced
  1102  	if err := srcNode.WaitForBlockchainSync(); err != nil {
  1103  		return nil, fmt.Errorf("unable to sync srcNode chain: %v", err)
  1104  	}
  1105  	if err := destNode.WaitForBlockchainSync(); err != nil {
  1106  		return nil, fmt.Errorf("unable to sync destNode chain: %v", err)
  1107  	}
  1108  
  1109  	openReq := &lnrpc.OpenChannelRequest{
  1110  		NodePubkey:         destNode.PubKey[:],
  1111  		LocalFundingAmount: int64(amt),
  1112  		PushAtoms:          int64(pushAmt),
  1113  		Private:            false,
  1114  	}
  1115  
  1116  	// We need to use n.runCtx here to keep the response stream alive after
  1117  	// the function is returned.
  1118  	respStream, err := srcNode.OpenChannel(n.runCtx, openReq)
  1119  	if err != nil {
  1120  		return nil, fmt.Errorf("unable to open channel between "+
  1121  			"alice and bob: %v", err)
  1122  	}
  1123  
  1124  	chanPending := make(chan *lnrpc.PendingUpdate)
  1125  	errChan := make(chan error)
  1126  	go func() {
  1127  		// Consume the "channel pending" update. This waits until the
  1128  		// node notifies us that the final message in the channel
  1129  		// funding workflow has been sent to the remote node.
  1130  		resp, err := respStream.Recv()
  1131  		if err != nil {
  1132  			errChan <- err
  1133  			return
  1134  		}
  1135  		pendingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
  1136  		if !ok {
  1137  			errChan <- fmt.Errorf("expected channel pending "+
  1138  				"update, instead got %v", resp)
  1139  			return
  1140  		}
  1141  
  1142  		chanPending <- pendingResp.ChanPending
  1143  	}()
  1144  
  1145  	select {
  1146  	case <-time.After(ChannelOpenTimeout):
  1147  		return nil, fmt.Errorf("timeout reached before chan pending " +
  1148  			"update sent")
  1149  	case err := <-errChan:
  1150  		return nil, err
  1151  	case pendingChan := <-chanPending:
  1152  		return pendingChan, nil
  1153  	}
  1154  }
  1155  
  1156  // WaitForChannelOpen waits for a notification that a channel is open by
  1157  // consuming a message from the past open channel stream. If the passed context
  1158  // has a timeout, then if the timeout is reached before the channel has been
  1159  // opened, then an error is returned.
  1160  func (n *NetworkHarness) WaitForChannelOpen(
  1161  	openChanStream lnrpc.Lightning_OpenChannelClient) (
  1162  	*lnrpc.ChannelPoint, error) {
  1163  
  1164  	ctx, cancel := context.WithTimeout(n.runCtx, ChannelOpenTimeout)
  1165  	defer cancel()
  1166  
  1167  	errChan := make(chan error)
  1168  	respChan := make(chan *lnrpc.ChannelPoint)
  1169  	go func() {
  1170  		resp, err := openChanStream.Recv()
  1171  		if err != nil {
  1172  			errChan <- fmt.Errorf("unable to read rpc resp: %v", err)
  1173  			return
  1174  		}
  1175  		fundingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
  1176  		if !ok {
  1177  			errChan <- fmt.Errorf("expected channel open update, "+
  1178  				"instead got %v", resp)
  1179  			return
  1180  		}
  1181  
  1182  		respChan <- fundingResp.ChanOpen.ChannelPoint
  1183  	}()
  1184  
  1185  	select {
  1186  	case <-ctx.Done():
  1187  		return nil, fmt.Errorf("timeout reached while waiting for " +
  1188  			"channel open")
  1189  	case err := <-errChan:
  1190  		return nil, err
  1191  	case chanPoint := <-respChan:
  1192  		return chanPoint, nil
  1193  	}
  1194  }
  1195  
  1196  // CloseChannel attempts to close the channel indicated by the
  1197  // passed channel point, initiated by the passed lnNode. If the passed context
  1198  // has a timeout, an error is returned if that timeout is reached before the
  1199  // channel close is pending.
  1200  func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
  1201  	cp *lnrpc.ChannelPoint, force bool) (lnrpc.Lightning_CloseChannelClient,
  1202  	*chainhash.Hash, error) {
  1203  
  1204  	// The cancel is intentionally left out here because the returned
  1205  	// item(close channel client) relies on the context being active. This
  1206  	// will be fixed once we finish refactoring the NetworkHarness.
  1207  	ctxt, cancel := context.WithTimeout(n.runCtx, ChannelCloseTimeout)
  1208  	defer cancel()
  1209  
  1210  	// Create a channel outpoint that we can use to compare to channels
  1211  	// from the ListChannelsResponse.
  1212  	txidHash, err := getChanPointFundingTxid(cp)
  1213  	if err != nil {
  1214  		return nil, nil, err
  1215  	}
  1216  	fundingTxID, err := chainhash.NewHash(txidHash)
  1217  	if err != nil {
  1218  		return nil, nil, err
  1219  	}
  1220  	chanPoint := wire.OutPoint{
  1221  		Hash:  *fundingTxID,
  1222  		Index: cp.OutputIndex,
  1223  	}
  1224  
  1225  	// We'll wait for *both* nodes to read the channel as active if we're
  1226  	// performing a cooperative channel closure.
  1227  	if !force {
  1228  		timeout := DefaultTimeout
  1229  		listReq := &lnrpc.ListChannelsRequest{}
  1230  
  1231  		// We define two helper functions, one two locate a particular
  1232  		// channel, and the other to check if a channel is active or
  1233  		// not.
  1234  		filterChannel := func(node *HarnessNode,
  1235  			op wire.OutPoint) (*lnrpc.Channel, error) {
  1236  			listResp, err := node.ListChannels(ctxt, listReq)
  1237  			if err != nil {
  1238  				return nil, err
  1239  			}
  1240  
  1241  			for _, c := range listResp.Channels {
  1242  				if c.ChannelPoint == op.String() {
  1243  					return c, nil
  1244  				}
  1245  			}
  1246  
  1247  			return nil, fmt.Errorf("unable to find channel")
  1248  		}
  1249  		activeChanPredicate := func(node *HarnessNode) func() bool {
  1250  			return func() bool {
  1251  				channel, err := filterChannel(node, chanPoint)
  1252  				if err != nil {
  1253  					return false
  1254  				}
  1255  
  1256  				return channel.Active
  1257  			}
  1258  		}
  1259  
  1260  		// Next, we'll fetch the target channel in order to get the
  1261  		// harness node that will be receiving the channel close
  1262  		// request.
  1263  		targetChan, err := filterChannel(lnNode, chanPoint)
  1264  		if err != nil {
  1265  			return nil, nil, err
  1266  		}
  1267  		receivingNode, err := n.LookUpNodeByPub(targetChan.RemotePubkey)
  1268  		if err != nil {
  1269  			return nil, nil, err
  1270  		}
  1271  
  1272  		// Before proceeding, we'll ensure that the channel is active
  1273  		// for both nodes.
  1274  		err = wait.Predicate(activeChanPredicate(lnNode), timeout)
  1275  		if err != nil {
  1276  			return nil, nil, fmt.Errorf("channel of closing " +
  1277  				"node not active in time")
  1278  		}
  1279  		err = wait.Predicate(
  1280  			activeChanPredicate(receivingNode), timeout,
  1281  		)
  1282  		if err != nil {
  1283  			return nil, nil, fmt.Errorf("channel of receiving " +
  1284  				"node not active in time")
  1285  		}
  1286  	}
  1287  
  1288  	var (
  1289  		closeRespStream lnrpc.Lightning_CloseChannelClient
  1290  		closeTxid       *chainhash.Hash
  1291  	)
  1292  
  1293  	err = wait.NoError(func() error {
  1294  		closeReq := &lnrpc.CloseChannelRequest{
  1295  			ChannelPoint: cp, Force: force,
  1296  		}
  1297  		// We need to use n.runCtx to keep the client stream alive
  1298  		// after the function has returned.
  1299  		closeRespStream, err = lnNode.CloseChannel(n.runCtx, closeReq)
  1300  		if err != nil {
  1301  			return fmt.Errorf("unable to close channel: %v", err)
  1302  		}
  1303  
  1304  		// Consume the "channel close" update in order to wait for the
  1305  		// closing transaction to be broadcast, then wait for the
  1306  		// closing tx to be seen within the network.
  1307  		closeResp, err := closeRespStream.Recv()
  1308  		if err != nil {
  1309  			return fmt.Errorf("unable to recv() from close "+
  1310  				"stream: %v", err)
  1311  		}
  1312  		pendingClose, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
  1313  		if !ok {
  1314  			return fmt.Errorf("expected channel close update, "+
  1315  				"instead got %v", pendingClose)
  1316  		}
  1317  
  1318  		closeTxid, err = chainhash.NewHash(
  1319  			pendingClose.ClosePending.Txid,
  1320  		)
  1321  		if err != nil {
  1322  			return fmt.Errorf("unable to decode closeTxid: "+
  1323  				"%v", err)
  1324  		}
  1325  		if err := n.Miner.waitForTxInMempool(*closeTxid); err != nil {
  1326  			return fmt.Errorf("error while waiting for "+
  1327  				"broadcast tx: %v", err)
  1328  		}
  1329  		return nil
  1330  	}, ChannelCloseTimeout)
  1331  	if err != nil {
  1332  		return nil, nil, err
  1333  	}
  1334  
  1335  	return closeRespStream, closeTxid, nil
  1336  }
  1337  
  1338  // WaitForChannelClose waits for a notification from the passed channel close
  1339  // stream that the node has deemed the channel has been fully closed. If the
  1340  // passed context has a timeout, then if the timeout is reached before the
  1341  // notification is received then an error is returned.
  1342  func (n *NetworkHarness) WaitForChannelClose(
  1343  	closeChanStream lnrpc.Lightning_CloseChannelClient) (
  1344  	*chainhash.Hash, error) {
  1345  
  1346  	errChan := make(chan error)
  1347  	updateChan := make(chan *lnrpc.CloseStatusUpdate_ChanClose)
  1348  	go func() {
  1349  		closeResp, err := closeChanStream.Recv()
  1350  		if err != nil {
  1351  			errChan <- err
  1352  			return
  1353  		}
  1354  
  1355  		closeFin, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ChanClose)
  1356  		if !ok {
  1357  			errChan <- fmt.Errorf("expected channel close update, "+
  1358  				"instead got %v", closeFin)
  1359  			return
  1360  		}
  1361  
  1362  		updateChan <- closeFin
  1363  	}()
  1364  
  1365  	// Wait until either the deadline for the context expires, an error
  1366  	// occurs, or the channel close update is received.
  1367  	select {
  1368  	case <-time.After(ChannelCloseTimeout):
  1369  		return nil, fmt.Errorf("timeout reached before update sent")
  1370  	case err := <-errChan:
  1371  		return nil, err
  1372  	case update := <-updateChan:
  1373  		return chainhash.NewHash(update.ChanClose.ClosingTxid)
  1374  	}
  1375  }
  1376  
  1377  // AssertChannelExists asserts that an active channel identified by the
  1378  // specified channel point exists from the point-of-view of the node. It takes
  1379  // an optional set of check functions which can be used to make further
  1380  // assertions using channel's values. These functions are responsible for
  1381  // failing the test themselves if they do not pass.
  1382  // nolint: interfacer
  1383  func (n *NetworkHarness) AssertChannelExists(node *HarnessNode,
  1384  	chanPoint *wire.OutPoint, checks ...func(*lnrpc.Channel)) error {
  1385  
  1386  	ctx, cancel := context.WithTimeout(n.runCtx, ChannelCloseTimeout)
  1387  	defer cancel()
  1388  
  1389  	req := &lnrpc.ListChannelsRequest{}
  1390  
  1391  	return wait.NoError(func() error {
  1392  		resp, err := node.ListChannels(ctx, req)
  1393  		if err != nil {
  1394  			return fmt.Errorf("unable fetch node's channels: %v", err)
  1395  		}
  1396  
  1397  		for _, channel := range resp.Channels {
  1398  			if channel.ChannelPoint == chanPoint.String() {
  1399  				// First check whether our channel is active,
  1400  				// failing early if it is not.
  1401  				if !channel.Active {
  1402  					return fmt.Errorf("channel %s inactive",
  1403  						chanPoint)
  1404  				}
  1405  
  1406  				// Apply any additional checks that we would
  1407  				// like to verify.
  1408  				for _, check := range checks {
  1409  					check(channel)
  1410  				}
  1411  
  1412  				return nil
  1413  			}
  1414  		}
  1415  
  1416  		return fmt.Errorf("channel %s not found", chanPoint)
  1417  	}, DefaultTimeout)
  1418  }
  1419  
  1420  // DumpLogs reads the current logs generated by the passed node, and returns
  1421  // the logs as a single string. This function is useful for examining the logs
  1422  // of a particular node in the case of a test failure.
  1423  // Logs from lightning node being generated with delay - you should
  1424  // add time.Sleep() in order to get all logs.
  1425  func (n *NetworkHarness) DumpLogs(node *HarnessNode) (string, error) {
  1426  	logFile := fmt.Sprintf("%v/simnet/lnd.log", node.Cfg.LogDir)
  1427  
  1428  	buf, err := ioutil.ReadFile(logFile)
  1429  	if err != nil {
  1430  		return "", err
  1431  	}
  1432  
  1433  	return string(buf), nil
  1434  }
  1435  
  1436  // SendCoins attempts to send amt atoms from the internal mining node to the
  1437  // targeted lightning node using a P2WKH address. 6 blocks are mined after in
  1438  // order to confirm the transaction.
  1439  func (n *NetworkHarness) SendCoins(t *testing.T, amt dcrutil.Amount,
  1440  	target *HarnessNode) {
  1441  
  1442  	err := n.sendCoins(
  1443  		amt, target, lnrpc.AddressType_PUBKEY_HASH,
  1444  		true,
  1445  	)
  1446  	require.NoErrorf(t, err, "unable to send coins for %s", target.Cfg.Name)
  1447  }
  1448  
  1449  // SendCoinsUnconfirmed sends coins from the internal mining node to the target
  1450  // lightning node using a P2WPKH address. No blocks are mined after, so the
  1451  // transaction remains unconfirmed.
  1452  func (n *NetworkHarness) SendCoinsUnconfirmed(t *testing.T, amt dcrutil.Amount,
  1453  	target *HarnessNode) {
  1454  
  1455  	err := n.sendCoins(
  1456  		amt, target, lnrpc.AddressType_PUBKEY_HASH,
  1457  		false,
  1458  	)
  1459  	require.NoErrorf(
  1460  		t, err, "unable to send unconfirmed coins for %s",
  1461  		target.Cfg.Name,
  1462  	)
  1463  }
  1464  
  1465  // sendCoins attempts to send amt atoms from the internal mining node to the
  1466  // targeted lightning node. The confirmed boolean indicates whether the
  1467  // transaction that pays to the target should confirm.
  1468  func (n *NetworkHarness) sendCoins(amt dcrutil.Amount, target *HarnessNode,
  1469  	addrType lnrpc.AddressType, confirmed bool) error {
  1470  
  1471  	ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
  1472  	defer cancel()
  1473  
  1474  	// This method requires that there be no other utxos for this node in
  1475  	// the mempool, therefore mine up to 244 blocks to clear it.
  1476  	maxBlocks := 244
  1477  	for i := 0; i < maxBlocks; i++ {
  1478  		req := &lnrpc.ListUnspentRequest{}
  1479  		resp, err := target.ListUnspent(ctx, req)
  1480  		if err != nil {
  1481  			return err
  1482  		}
  1483  
  1484  		if len(resp.Utxos) == 0 {
  1485  			break
  1486  		}
  1487  		if i == maxBlocks-1 {
  1488  			return fmt.Errorf("node still has %d utxos in the "+
  1489  				"mempool", len(resp.Utxos))
  1490  		}
  1491  		if _, err := n.Generate(1); err != nil {
  1492  			return err
  1493  		}
  1494  	}
  1495  
  1496  	balReq := &lnrpc.WalletBalanceRequest{}
  1497  	initialBalance, err := target.WalletBalance(ctx, balReq)
  1498  	if err != nil {
  1499  		return err
  1500  	}
  1501  
  1502  	// First, obtain an address from the target lightning node, preferring
  1503  	// to receive a p2wkh address s.t the output can immediately be used as
  1504  	// an input to a funding transaction.
  1505  	addrReq := &lnrpc.NewAddressRequest{
  1506  		Type: addrType,
  1507  	}
  1508  	resp, err := target.NewAddress(ctx, addrReq)
  1509  	if err != nil {
  1510  		return err
  1511  	}
  1512  	addr, err := stdaddr.DecodeAddress(resp.Address, n.netParams)
  1513  	if err != nil {
  1514  		return err
  1515  	}
  1516  	addrScript, err := input.PayToAddrScript(addr)
  1517  	if err != nil {
  1518  		return err
  1519  	}
  1520  
  1521  	// Sleep to allow the wallet's address manager to lock and prevent
  1522  	// triggering dcrwallet's #1372 deadlock condition.
  1523  	time.Sleep(time.Millisecond * 100)
  1524  	target.LogPrintf("Asking for %s coins at addr %s while having balance %s+%s",
  1525  		amt, addr, dcrutil.Amount(initialBalance.ConfirmedBalance),
  1526  		dcrutil.Amount(initialBalance.UnconfirmedBalance))
  1527  
  1528  	// Generate a transaction which creates an output to the target
  1529  	// pkScript of the desired amount.
  1530  	output := &wire.TxOut{
  1531  		PkScript: addrScript,
  1532  		Value:    int64(amt),
  1533  	}
  1534  	_, err = n.Miner.SendOutputs(ctx, []*wire.TxOut{output}, 7500)
  1535  	if err != nil {
  1536  		return err
  1537  	}
  1538  
  1539  	// Encode the pkScript in hex as this the format that it will be
  1540  	// returned via rpc.
  1541  	expPkScriptStr := hex.EncodeToString(addrScript)
  1542  
  1543  	// Now, wait for ListUnspent to show the unconfirmed transaction
  1544  	// containing the correct pkscript.
  1545  	err = wait.NoError(func() error {
  1546  		req := &lnrpc.ListUnspentRequest{}
  1547  		resp, err := target.ListUnspent(ctx, req)
  1548  		if err != nil {
  1549  			return err
  1550  		}
  1551  
  1552  		// When using this method, there should only ever be on
  1553  		// unconfirmed transaction.
  1554  		if len(resp.Utxos) != 1 {
  1555  			return fmt.Errorf("number of unconfirmed utxos "+
  1556  				"should be 1, found %d", len(resp.Utxos))
  1557  		}
  1558  
  1559  		// Assert that the lone unconfirmed utxo contains the same
  1560  		// pkscript as the output generated above.
  1561  		pkScriptStr := resp.Utxos[0].PkScript
  1562  		if strings.Compare(pkScriptStr, expPkScriptStr) != 0 {
  1563  			return fmt.Errorf("pkscript mismatch, want: %s, "+
  1564  				"found: %s", expPkScriptStr, pkScriptStr)
  1565  		}
  1566  
  1567  		return nil
  1568  	}, DefaultTimeout)
  1569  	if err != nil {
  1570  		return fmt.Errorf("unconfirmed utxo was not found in "+
  1571  			"ListUnspent: %v", err)
  1572  	}
  1573  
  1574  	// If the transaction should remain unconfirmed, then we'll wait until
  1575  	// the target node's unconfirmed balance reflects the expected balance
  1576  	// and exit.
  1577  	if !confirmed {
  1578  		expectedBalance := dcrutil.Amount(initialBalance.UnconfirmedBalance) + amt
  1579  		return target.WaitForBalance(expectedBalance, false)
  1580  	}
  1581  
  1582  	// Otherwise, we'll generate 6 new blocks to ensure the output gains a
  1583  	// sufficient number of confirmations and wait for the balance to
  1584  	// reflect what's expected.
  1585  	if _, err := n.Generate(6); err != nil {
  1586  		return err
  1587  	}
  1588  
  1589  	// Wait until the wallet has seen all 6 blocks.
  1590  	_, height, err := n.Miner.Node.GetBestBlock(context.TODO())
  1591  	if err != nil {
  1592  		return err
  1593  	}
  1594  	ctxt, _ := context.WithTimeout(context.Background(), DefaultTimeout)
  1595  	err = target.WaitForBlockHeight(ctxt, uint32(height))
  1596  	if err != nil {
  1597  		return nil
  1598  	}
  1599  
  1600  	// Ensure the balance is as expected.
  1601  	fullInitialBalance := initialBalance.ConfirmedBalance +
  1602  		initialBalance.UnconfirmedBalance
  1603  	expectedBalance := dcrutil.Amount(fullInitialBalance) + amt
  1604  	return target.WaitForBalance(expectedBalance, true)
  1605  }
  1606  
  1607  // Generate generates the given number of blocks while waiting for enough time
  1608  // that the new block can propagate to the voting node and votes for the new
  1609  // block can be generated and published.
  1610  func (n *NetworkHarness) Generate(nb uint32) ([]*chainhash.Hash, error) {
  1611  	ctx, cancel := context.WithTimeout(n.runCtx, time.Second*10*time.Duration(nb))
  1612  	defer cancel()
  1613  	return n.Miner.votingWallet.GenerateBlocks(ctx, nb)
  1614  }
  1615  
  1616  // SlowGenerate generates blocks with a large time interval between them. This
  1617  // is useful for debugging.
  1618  func (n *NetworkHarness) SlowGenerate(nb uint32) ([]*chainhash.Hash, error) {
  1619  	res := make([]*chainhash.Hash, nb)
  1620  	for i := uint32(0); i < nb; i++ {
  1621  		time.Sleep(time.Second * 3)
  1622  		genRes, err := n.Generate(1)
  1623  		if err != nil {
  1624  			return nil, err
  1625  		}
  1626  		res[i] = genRes[0]
  1627  	}
  1628  	return res, nil
  1629  }
  1630  
  1631  func (n *NetworkHarness) SetFeeEstimate(fee chainfee.AtomPerKByte) {
  1632  	n.feeService.setFee(fee)
  1633  }
  1634  
  1635  func (n *NetworkHarness) SetFeeEstimateWithConf(
  1636  	fee chainfee.AtomPerKByte, conf uint32) {
  1637  
  1638  	n.feeService.setFeeWithConf(fee, conf)
  1639  }
  1640  
  1641  // copyAll copies all files and directories from srcDir to dstDir recursively.
  1642  // Note that this function does not support links.
  1643  func copyAll(dstDir, srcDir string) error {
  1644  	entries, err := ioutil.ReadDir(srcDir)
  1645  	if err != nil {
  1646  		return err
  1647  	}
  1648  
  1649  	for _, entry := range entries {
  1650  		srcPath := filepath.Join(srcDir, entry.Name())
  1651  		dstPath := filepath.Join(dstDir, entry.Name())
  1652  
  1653  		info, err := os.Stat(srcPath)
  1654  		if err != nil {
  1655  			return err
  1656  		}
  1657  
  1658  		if info.IsDir() {
  1659  			err := os.Mkdir(dstPath, info.Mode())
  1660  			if err != nil && !os.IsExist(err) {
  1661  				return err
  1662  			}
  1663  
  1664  			err = copyAll(dstPath, srcPath)
  1665  			if err != nil {
  1666  				return err
  1667  			}
  1668  		} else if err := CopyFile(dstPath, srcPath); err != nil {
  1669  			return err
  1670  		}
  1671  	}
  1672  
  1673  	return nil
  1674  }
  1675  
  1676  // BackupDb creates a backup of the current database.
  1677  func (n *NetworkHarness) BackupDb(hn *HarnessNode) error {
  1678  	if hn.backupDbDir != "" {
  1679  		return errors.New("backup already created")
  1680  	}
  1681  
  1682  	restart, err := n.SuspendNode(hn)
  1683  	if err != nil {
  1684  		return err
  1685  	}
  1686  
  1687  	if hn.postgresDbName != "" {
  1688  		// Backup database.
  1689  		backupDbName := hn.postgresDbName + "_backup"
  1690  		err := executePgQuery(
  1691  			"CREATE DATABASE " + backupDbName + " WITH TEMPLATE " +
  1692  				hn.postgresDbName,
  1693  		)
  1694  		if err != nil {
  1695  			return err
  1696  		}
  1697  	} else {
  1698  		// Backup files.
  1699  		tempDir, err := ioutil.TempDir("", "past-state")
  1700  		if err != nil {
  1701  			return fmt.Errorf("unable to create temp db folder: %v",
  1702  				err)
  1703  		}
  1704  
  1705  		if err := copyAll(tempDir, hn.DBDir()); err != nil {
  1706  			return fmt.Errorf("unable to copy database files: %v",
  1707  				err)
  1708  		}
  1709  
  1710  		hn.backupDbDir = tempDir
  1711  	}
  1712  
  1713  	err = restart()
  1714  	if err != nil {
  1715  		return err
  1716  	}
  1717  
  1718  	return nil
  1719  }
  1720  
  1721  // RestoreDb restores a database backup.
  1722  func (n *NetworkHarness) RestoreDb(hn *HarnessNode) error {
  1723  	if hn.postgresDbName != "" {
  1724  		// Restore database.
  1725  		backupDbName := hn.postgresDbName + "_backup"
  1726  		err := executePgQuery(
  1727  			"DROP DATABASE " + hn.postgresDbName,
  1728  		)
  1729  		if err != nil {
  1730  			return err
  1731  		}
  1732  		err = executePgQuery(
  1733  			"ALTER DATABASE " + backupDbName + " RENAME TO " + hn.postgresDbName,
  1734  		)
  1735  		if err != nil {
  1736  			return err
  1737  		}
  1738  	} else {
  1739  		// Restore files.
  1740  		if hn.backupDbDir == "" {
  1741  			return errors.New("no database backup created")
  1742  		}
  1743  
  1744  		if err := copyAll(hn.DBDir(), hn.backupDbDir); err != nil {
  1745  			return fmt.Errorf("unable to copy database files: %v", err)
  1746  		}
  1747  
  1748  		if err := os.RemoveAll(hn.backupDbDir); err != nil {
  1749  			return fmt.Errorf("unable to remove backup dir: %v", err)
  1750  		}
  1751  		hn.backupDbDir = ""
  1752  	}
  1753  
  1754  	return nil
  1755  }
  1756  
  1757  // getChanPointFundingTxid returns the given channel point's funding txid in
  1758  // raw bytes.
  1759  func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) ([]byte, error) {
  1760  	var txid []byte
  1761  
  1762  	// A channel point's funding txid can be get/set as a byte slice or a
  1763  	// string. In the case it is a string, decode it.
  1764  	switch chanPoint.GetFundingTxid().(type) {
  1765  	case *lnrpc.ChannelPoint_FundingTxidBytes:
  1766  		txid = chanPoint.GetFundingTxidBytes()
  1767  	case *lnrpc.ChannelPoint_FundingTxidStr:
  1768  		s := chanPoint.GetFundingTxidStr()
  1769  		h, err := chainhash.NewHashFromStr(s)
  1770  		if err != nil {
  1771  			return nil, err
  1772  		}
  1773  
  1774  		txid = h[:]
  1775  	}
  1776  
  1777  	return txid, nil
  1778  }