github.com/decred/dcrlnd@v0.7.6/lntest/harness_node.go (about)

     1  package lntest
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"crypto/rand"
     7  	"encoding/hex"
     8  	"encoding/json"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"os"
    13  	"os/exec"
    14  	"path"
    15  	"path/filepath"
    16  	"strings"
    17  	"sync"
    18  	"sync/atomic"
    19  	"time"
    20  
    21  	pb "decred.org/dcrwallet/v4/rpc/walletrpc"
    22  	"github.com/decred/dcrd/chaincfg/v3"
    23  	"github.com/decred/dcrd/dcrutil/v4"
    24  	"github.com/decred/dcrd/wire"
    25  	"github.com/decred/dcrlnd/aezeed"
    26  	"github.com/decred/dcrlnd/chanbackup"
    27  	"github.com/decred/dcrlnd/lnrpc"
    28  	"github.com/decred/dcrlnd/lnrpc/invoicesrpc"
    29  	"github.com/decred/dcrlnd/lnrpc/routerrpc"
    30  	"github.com/decred/dcrlnd/lnrpc/signrpc"
    31  	"github.com/decred/dcrlnd/lnrpc/walletrpc"
    32  	"github.com/decred/dcrlnd/lnrpc/watchtowerrpc"
    33  	"github.com/decred/dcrlnd/lnrpc/wtclientrpc"
    34  	"github.com/decred/dcrlnd/lntest/wait"
    35  	"github.com/decred/dcrlnd/macaroons"
    36  	"github.com/jackc/pgx/v4/pgxpool"
    37  	"google.golang.org/grpc"
    38  	"google.golang.org/grpc/backoff"
    39  	"google.golang.org/grpc/codes"
    40  	"google.golang.org/grpc/credentials"
    41  	"google.golang.org/grpc/status"
    42  	"gopkg.in/macaroon.v2"
    43  )
    44  
    45  const (
    46  	// logPubKeyBytes is the number of bytes of the node's PubKey that will
    47  	// be appended to the log file name. The whole PubKey is too long and
    48  	// not really necessary to quickly identify what node produced which
    49  	// log file.
    50  	logPubKeyBytes = 4
    51  
    52  	// trickleDelay is the amount of time in milliseconds between each
    53  	// release of announcements by AuthenticatedGossiper to the network.
    54  	trickleDelay = 50
    55  
    56  	postgresDsn = "postgres://postgres:postgres@localhost:6432/%s?sslmode=disable"
    57  
    58  	// commitInterval specifies the maximum interval the graph database
    59  	// will wait between attempting to flush a batch of modifications to
    60  	// disk(db.batch-commit-interval).
    61  	commitInterval = 10 * time.Millisecond
    62  )
    63  
    64  var (
    65  	// numActiveNodes is the number of active nodes within the test network.
    66  	numActiveNodes uint32 = 0
    67  )
    68  
    69  func postgresDatabaseDsn(dbName string) string {
    70  	return fmt.Sprintf(postgresDsn, dbName)
    71  }
    72  
    73  // BackendConfig is an interface that abstracts away the specific chain backend
    74  // node implementation.
    75  type BackendConfig interface {
    76  	// GenArgs returns the arguments needed to be passed to LND at startup
    77  	// for using this node as a chain backend.
    78  	GenArgs() []string
    79  
    80  	// StartWalletSync starts the sync process of a remote wallet using the
    81  	// given backend implementation.
    82  	StartWalletSync(loader pb.WalletLoaderServiceClient, password []byte) error
    83  
    84  	// ConnectMiner is called to establish a connection to the test miner.
    85  	ConnectMiner() error
    86  
    87  	// DisconnectMiner is called to disconnect the miner.
    88  	DisconnectMiner() error
    89  
    90  	// Name returns the name of the backend type.
    91  	Name() string
    92  }
    93  
    94  // NodeConfig is the basic interface a node configuration must implement.
    95  type NodeConfig interface {
    96  	// BaseConfig returns the base node configuration struct.
    97  	BaseConfig() *BaseNodeConfig
    98  
    99  	// GenerateListeningPorts generates the ports to listen on designated
   100  	// for the current lightning network test.
   101  	GenerateListeningPorts()
   102  
   103  	// GenArgs generates a slice of command line arguments from the
   104  	// lightning node config struct.
   105  	GenArgs() []string
   106  }
   107  
   108  // BaseNodeConfig is the base node configuration.
   109  type BaseNodeConfig struct {
   110  	Name string
   111  
   112  	// LogFilenamePrefix is used to prefix node log files. Can be used
   113  	// to store the current test case for simpler postmortem debugging.
   114  	LogFilenamePrefix string
   115  
   116  	BackendCfg BackendConfig
   117  	NetParams  *chaincfg.Params
   118  	BaseDir    string
   119  	ExtraArgs  []string
   120  
   121  	DataDir        string
   122  	LogDir         string
   123  	TLSCertPath    string
   124  	TLSKeyPath     string
   125  	AdminMacPath   string
   126  	ReadMacPath    string
   127  	InvoiceMacPath string
   128  
   129  	HasSeed      bool
   130  	Password     []byte
   131  	RemoteWallet bool
   132  	DcrwNode     bool
   133  
   134  	P2PPort     int
   135  	RPCPort     int
   136  	RESTPort    int
   137  	ProfilePort int
   138  	WalletPort  int
   139  
   140  	AcceptKeySend bool
   141  	AcceptAMP     bool
   142  
   143  	FeeURL string
   144  
   145  	DbBackend   DatabaseBackend
   146  	PostgresDsn string
   147  }
   148  
   149  func (cfg BaseNodeConfig) P2PAddr() string {
   150  	return fmt.Sprintf(ListenerFormat, cfg.P2PPort)
   151  }
   152  
   153  func (cfg BaseNodeConfig) RPCAddr() string {
   154  	return fmt.Sprintf(ListenerFormat, cfg.RPCPort)
   155  }
   156  
   157  func (cfg BaseNodeConfig) RESTAddr() string {
   158  	return fmt.Sprintf(ListenerFormat, cfg.RESTPort)
   159  }
   160  
   161  // DBDir returns the holding directory path of the graph database.
   162  func (cfg BaseNodeConfig) DBDir() string {
   163  	return filepath.Join(cfg.DataDir, "graph", cfg.NetParams.Name)
   164  }
   165  
   166  func (cfg BaseNodeConfig) DBPath() string {
   167  	return filepath.Join(cfg.DBDir(), "channel.db")
   168  }
   169  
   170  func (cfg BaseNodeConfig) ChanBackupPath() string {
   171  	return filepath.Join(
   172  		cfg.DataDir, "chain", "decred",
   173  		fmt.Sprintf(
   174  			"%v/%v", cfg.NetParams.Name,
   175  			chanbackup.DefaultBackupFileName,
   176  		),
   177  	)
   178  }
   179  
   180  // GenerateListeningPorts generates the ports to listen on designated for the
   181  // current lightning network test.
   182  func (cfg *BaseNodeConfig) GenerateListeningPorts() {
   183  	if cfg.P2PPort == 0 {
   184  		cfg.P2PPort = NextAvailablePort()
   185  	}
   186  	if cfg.RPCPort == 0 {
   187  		cfg.RPCPort = NextAvailablePort()
   188  	}
   189  	if cfg.RESTPort == 0 {
   190  		cfg.RESTPort = NextAvailablePort()
   191  	}
   192  	if cfg.ProfilePort == 0 {
   193  		cfg.ProfilePort = NextAvailablePort()
   194  	}
   195  	if cfg.WalletPort == 0 {
   196  		cfg.WalletPort = NextAvailablePort()
   197  	}
   198  }
   199  
   200  // BaseConfig returns the base node configuration struct.
   201  func (cfg *BaseNodeConfig) BaseConfig() *BaseNodeConfig {
   202  	return cfg
   203  }
   204  
   205  // GenArgs generates a slice of command line arguments from the lightning node
   206  // config struct.
   207  func (cfg *BaseNodeConfig) GenArgs() []string {
   208  	var args []string
   209  
   210  	switch cfg.NetParams.Net {
   211  	case wire.TestNet3:
   212  		args = append(args, "--testnet")
   213  	case wire.SimNet:
   214  		args = append(args, "--simnet")
   215  	}
   216  
   217  	backendArgs := cfg.BackendCfg.GenArgs()
   218  	args = append(args, backendArgs...)
   219  
   220  	nodeArgs := []string{
   221  		"--nobootstrap",
   222  		"--debuglevel=debug",
   223  		"--defaultchanconfs=1",
   224  		fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval),
   225  		fmt.Sprintf("--defaultremotedelay=%v", DefaultCSV),
   226  		fmt.Sprintf("--rpclisten=%v", cfg.RPCAddr()),
   227  		fmt.Sprintf("--restlisten=%v", cfg.RESTAddr()),
   228  		fmt.Sprintf("--restcors=https://%v", cfg.RESTAddr()),
   229  		fmt.Sprintf("--listen=%v", cfg.P2PAddr()),
   230  		fmt.Sprintf("--externalip=%v", cfg.P2PAddr()),
   231  		fmt.Sprintf("--logdir=%v", cfg.LogDir),
   232  		fmt.Sprintf("--datadir=%v", cfg.DataDir),
   233  		fmt.Sprintf("--tlscertpath=%v", cfg.TLSCertPath),
   234  		fmt.Sprintf("--tlskeypath=%v", cfg.TLSKeyPath),
   235  		fmt.Sprintf("--configfile=%v", cfg.DataDir),
   236  		fmt.Sprintf("--adminmacaroonpath=%v", cfg.AdminMacPath),
   237  		fmt.Sprintf("--readonlymacaroonpath=%v", cfg.ReadMacPath),
   238  		fmt.Sprintf("--invoicemacaroonpath=%v", cfg.InvoiceMacPath),
   239  		fmt.Sprintf("--trickledelay=%v", trickleDelay),
   240  		fmt.Sprintf("--profile=%d", cfg.ProfilePort),
   241  		fmt.Sprintf("--caches.rpc-graph-cache-duration=%d", 0),
   242  	}
   243  	args = append(args, nodeArgs...)
   244  
   245  	if cfg.RemoteWallet {
   246  		args = append(args, fmt.Sprintf("--dcrwallet.grpchost=localhost:%d", cfg.WalletPort))
   247  		args = append(args, fmt.Sprintf("--dcrwallet.certpath=%s", cfg.TLSCertPath))
   248  		args = append(args, fmt.Sprintf("--dcrwallet.clientkeypath=%s", cfg.TLSKeyPath))
   249  		args = append(args, fmt.Sprintf("--dcrwallet.clientcertpath=%s", cfg.TLSCertPath))
   250  	}
   251  
   252  	if cfg.DcrwNode {
   253  		args = append(args, "--node=dcrw")
   254  	}
   255  
   256  	if !cfg.HasSeed {
   257  		args = append(args, "--noseedbackup")
   258  	}
   259  
   260  	if cfg.ExtraArgs != nil {
   261  		args = append(args, cfg.ExtraArgs...)
   262  	}
   263  
   264  	if cfg.AcceptKeySend {
   265  		args = append(args, "--accept-keysend")
   266  	}
   267  
   268  	if cfg.AcceptAMP {
   269  		args = append(args, "--accept-amp")
   270  	}
   271  
   272  	switch cfg.DbBackend {
   273  	case BackendEtcd:
   274  		args = append(args, "--db.backend=etcd")
   275  		args = append(args, "--db.etcd.embedded")
   276  		args = append(
   277  			args, fmt.Sprintf(
   278  				"--db.etcd.embedded_client_port=%v",
   279  				NextAvailablePort(),
   280  			),
   281  		)
   282  		args = append(
   283  			args, fmt.Sprintf(
   284  				"--db.etcd.embedded_peer_port=%v",
   285  				NextAvailablePort(),
   286  			),
   287  		)
   288  		args = append(
   289  			args, fmt.Sprintf(
   290  				"--db.etcd.embedded_log_file=%v",
   291  				path.Join(cfg.LogDir, "etcd.log"),
   292  			),
   293  		)
   294  
   295  	case BackendPostgres:
   296  		args = append(args, "--db.backend=postgres")
   297  		args = append(args, "--db.postgres.dsn="+cfg.PostgresDsn)
   298  	}
   299  
   300  	if cfg.FeeURL != "" {
   301  		args = append(args, "--feeurl="+cfg.FeeURL)
   302  	}
   303  
   304  	return args
   305  }
   306  
   307  func (cfg *BaseNodeConfig) genWalletArgs() []string {
   308  	var args []string
   309  
   310  	switch cfg.NetParams.Net {
   311  	case wire.TestNet3:
   312  		args = append(args, "--testnet")
   313  	case wire.SimNet:
   314  		args = append(args, "--simnet")
   315  	}
   316  
   317  	args = append(args, "--nolegacyrpc")
   318  	args = append(args, "--noinitialload")
   319  	args = append(args, "--debuglevel=debug")
   320  	args = append(args, fmt.Sprintf("--grpclisten=127.0.0.1:%d", cfg.WalletPort))
   321  	args = append(args, fmt.Sprintf("--logdir=%s", cfg.LogDir))
   322  	args = append(args, fmt.Sprintf("--appdata=%s", cfg.DataDir))
   323  	args = append(args, fmt.Sprintf("--rpccert=%s", cfg.TLSCertPath))
   324  	args = append(args, fmt.Sprintf("--rpckey=%s", cfg.TLSKeyPath))
   325  	args = append(args, fmt.Sprintf("--clientcafile=%s", cfg.TLSCertPath))
   326  
   327  	// This is not strictly necessary, but it's useful to reduce the
   328  	// startup time of test wallets since it prevents two address discovery
   329  	// processes from happening.
   330  	args = append(args, "--disablecointypeupgrades")
   331  
   332  	return args
   333  }
   334  
   335  // policyUpdateMap defines a type to store channel policy updates. It has the
   336  // format,
   337  //
   338  //	{
   339  //	 "chanPoint1": {
   340  //	      "advertisingNode1": [
   341  //	             policy1, policy2, ...
   342  //	      ],
   343  //	      "advertisingNode2": [
   344  //	             policy1, policy2, ...
   345  //	      ]
   346  //	 },
   347  //	 "chanPoint2": ...
   348  //	}
   349  type policyUpdateMap map[string]map[string][]*lnrpc.RoutingPolicy
   350  
   351  // HarnessNode represents an instance of lnd running within our test network
   352  // harness. Each HarnessNode instance also fully embeds an RPC client in
   353  // order to pragmatically drive the node.
   354  type HarnessNode struct {
   355  	Cfg *BaseNodeConfig
   356  
   357  	// NodeID is a unique identifier for the node within a NetworkHarness.
   358  	NodeID int
   359  
   360  	// PubKey is the serialized compressed identity public key of the node.
   361  	// This field will only be populated once the node itself has been
   362  	// started via the start() method.
   363  	PubKey    [33]byte
   364  	PubKeyStr string
   365  
   366  	walletCmd  *exec.Cmd
   367  	walletConn *grpc.ClientConn
   368  
   369  	// rpc holds a list of RPC clients.
   370  	rpc *RPCClients
   371  
   372  	// chanWatchRequests receives a request for watching a particular event
   373  	// for a given channel.
   374  	chanWatchRequests chan *chanWatchRequest
   375  
   376  	// For each outpoint, we'll track an integer which denotes the number of
   377  	// edges seen for that channel within the network. When this number
   378  	// reaches 2, then it means that both edge advertisements has propagated
   379  	// through the network.
   380  	openChans        map[wire.OutPoint]int
   381  	openChanWatchers map[wire.OutPoint][]chan struct{}
   382  
   383  	closedChans       map[wire.OutPoint]struct{}
   384  	closeChanWatchers map[wire.OutPoint][]chan struct{}
   385  
   386  	// policyUpdates stores a slice of seen polices by each advertising
   387  	// node and the outpoint.
   388  	policyUpdates policyUpdateMap
   389  
   390  	// backupDbDir is the path where a database backup is stored, if any.
   391  	backupDbDir string
   392  
   393  	// postgresDbName is the name of the postgres database where lnd data is
   394  	// stored in.
   395  	postgresDbName string
   396  
   397  	// runCtx is a context with cancel method. It's used to signal when the
   398  	// node needs to quit, and used as the parent context when spawning
   399  	// children contexts for RPC requests.
   400  	runCtx context.Context
   401  	cancel context.CancelFunc
   402  
   403  	wg      sync.WaitGroup
   404  	cmd     *exec.Cmd
   405  	logFile *os.File
   406  
   407  	// TODO(yy): remove
   408  	lnrpc.LightningClient
   409  	lnrpc.WalletUnlockerClient
   410  	invoicesrpc.InvoicesClient
   411  	SignerClient     signrpc.SignerClient
   412  	RouterClient     routerrpc.RouterClient
   413  	WalletKitClient  walletrpc.WalletKitClient
   414  	Watchtower       watchtowerrpc.WatchtowerClient
   415  	WatchtowerClient wtclientrpc.WatchtowerClientClient
   416  	StateClient      lnrpc.StateClient
   417  }
   418  
   419  // RPCClients wraps a list of RPC clients into a single struct for easier
   420  // access.
   421  type RPCClients struct {
   422  	// conn is the underlying connection to the grpc endpoint of the node.
   423  	conn *grpc.ClientConn
   424  
   425  	LN               lnrpc.LightningClient
   426  	WalletUnlocker   lnrpc.WalletUnlockerClient
   427  	Invoice          invoicesrpc.InvoicesClient
   428  	Signer           signrpc.SignerClient
   429  	Router           routerrpc.RouterClient
   430  	WalletKit        walletrpc.WalletKitClient
   431  	Watchtower       watchtowerrpc.WatchtowerClient
   432  	WatchtowerClient wtclientrpc.WatchtowerClientClient
   433  	State            lnrpc.StateClient
   434  }
   435  
   436  // Assert *HarnessNode implements the lnrpc.LightningClient interface.
   437  var _ lnrpc.LightningClient = (*HarnessNode)(nil)
   438  var _ lnrpc.WalletUnlockerClient = (*HarnessNode)(nil)
   439  var _ invoicesrpc.InvoicesClient = (*HarnessNode)(nil)
   440  
   441  // nextNodeID generates a unique sequence to be used as the node's ID.
   442  func nextNodeID() int {
   443  	return int(atomic.AddUint32(&numActiveNodes, 1))
   444  }
   445  
   446  // newNode creates a new test lightning node instance from the passed config.
   447  func newNode(cfg *BaseNodeConfig) (*HarnessNode, error) {
   448  	if cfg.BaseDir == "" {
   449  		var err error
   450  		cfg.BaseDir, err = ioutil.TempDir("", "lndtest-node")
   451  		if err != nil {
   452  			return nil, err
   453  		}
   454  	}
   455  	cfg.DataDir = filepath.Join(cfg.BaseDir, "data")
   456  	cfg.LogDir = filepath.Join(cfg.BaseDir, "log")
   457  	cfg.TLSCertPath = filepath.Join(cfg.DataDir, "tls.cert")
   458  	cfg.TLSKeyPath = filepath.Join(cfg.DataDir, "tls.key")
   459  
   460  	networkDir := filepath.Join(
   461  		cfg.DataDir, "chain", "decred", cfg.NetParams.Name,
   462  	)
   463  	cfg.AdminMacPath = filepath.Join(networkDir, "admin.macaroon")
   464  	cfg.ReadMacPath = filepath.Join(networkDir, "readonly.macaroon")
   465  	cfg.InvoiceMacPath = filepath.Join(networkDir, "invoice.macaroon")
   466  
   467  	cfg.GenerateListeningPorts()
   468  
   469  	err := os.MkdirAll(cfg.DataDir, os.FileMode(0755))
   470  	if err != nil {
   471  		return nil, err
   472  	}
   473  
   474  	// Run all tests with accept keysend. The keysend code is very isolated
   475  	// and it is highly unlikely that it would affect regular itests when
   476  	// enabled.
   477  	cfg.AcceptKeySend = true
   478  
   479  	// Create temporary database.
   480  	var dbName string
   481  	if cfg.DbBackend == BackendPostgres {
   482  		var err error
   483  		dbName, err = createTempPgDb()
   484  		if err != nil {
   485  			return nil, err
   486  		}
   487  		cfg.PostgresDsn = postgresDatabaseDsn(dbName)
   488  	}
   489  
   490  	return &HarnessNode{
   491  		Cfg:               cfg,
   492  		NodeID:            nextNodeID(),
   493  		chanWatchRequests: make(chan *chanWatchRequest),
   494  		openChans:         make(map[wire.OutPoint]int),
   495  		openChanWatchers:  make(map[wire.OutPoint][]chan struct{}),
   496  
   497  		closedChans:       make(map[wire.OutPoint]struct{}),
   498  		closeChanWatchers: make(map[wire.OutPoint][]chan struct{}),
   499  
   500  		policyUpdates: policyUpdateMap{},
   501  
   502  		postgresDbName: dbName,
   503  	}, nil
   504  }
   505  
   506  func createTempPgDb() (string, error) {
   507  	// Create random database name.
   508  	randBytes := make([]byte, 8)
   509  	_, err := rand.Read(randBytes)
   510  	if err != nil {
   511  		return "", err
   512  	}
   513  	dbName := "itest_" + hex.EncodeToString(randBytes)
   514  
   515  	// Create database.
   516  	err = executePgQuery("CREATE DATABASE " + dbName)
   517  	if err != nil {
   518  		return "", err
   519  	}
   520  
   521  	return dbName, nil
   522  }
   523  
   524  func executePgQuery(query string) error {
   525  	pool, err := pgxpool.Connect(
   526  		context.Background(),
   527  		postgresDatabaseDsn("postgres"),
   528  	)
   529  	if err != nil {
   530  		return fmt.Errorf("unable to connect to database: %v", err)
   531  	}
   532  	defer pool.Close()
   533  
   534  	_, err = pool.Exec(context.Background(), query)
   535  	return err
   536  }
   537  
   538  // String gives the internal state of the node which is useful for debugging.
   539  func (hn *HarnessNode) String() string {
   540  	type nodeCfg struct {
   541  		LogFilenamePrefix string
   542  		ExtraArgs         []string
   543  		HasSeed           bool
   544  		P2PPort           int
   545  		RPCPort           int
   546  		RESTPort          int
   547  		ProfilePort       int
   548  		AcceptKeySend     bool
   549  		AcceptAMP         bool
   550  		FeeURL            string
   551  	}
   552  
   553  	nodeState := struct {
   554  		NodeID      int
   555  		Name        string
   556  		PubKey      string
   557  		OpenChans   map[string]int
   558  		ClosedChans map[string]struct{}
   559  		NodeCfg     nodeCfg
   560  	}{
   561  		NodeID:      hn.NodeID,
   562  		Name:        hn.Cfg.Name,
   563  		PubKey:      hn.PubKeyStr,
   564  		OpenChans:   make(map[string]int),
   565  		ClosedChans: make(map[string]struct{}),
   566  		NodeCfg: nodeCfg{
   567  			LogFilenamePrefix: hn.Cfg.LogFilenamePrefix,
   568  			ExtraArgs:         hn.Cfg.ExtraArgs,
   569  			HasSeed:           hn.Cfg.HasSeed,
   570  			P2PPort:           hn.Cfg.P2PPort,
   571  			RPCPort:           hn.Cfg.RPCPort,
   572  			RESTPort:          hn.Cfg.RESTPort,
   573  			AcceptKeySend:     hn.Cfg.AcceptKeySend,
   574  			AcceptAMP:         hn.Cfg.AcceptAMP,
   575  			FeeURL:            hn.Cfg.FeeURL,
   576  		},
   577  	}
   578  
   579  	for outpoint, count := range hn.openChans {
   580  		nodeState.OpenChans[outpoint.String()] = count
   581  	}
   582  	for outpoint, count := range hn.closedChans {
   583  		nodeState.ClosedChans[outpoint.String()] = count
   584  	}
   585  
   586  	stateBytes, err := json.MarshalIndent(nodeState, "", "\t")
   587  	if err != nil {
   588  		return fmt.Sprintf("\n encode node state with err: %v", err)
   589  	}
   590  
   591  	return fmt.Sprintf("\nnode state: %s", stateBytes)
   592  }
   593  
   594  // DBPath returns the filepath to the channeldb database file for this node.
   595  func (hn *HarnessNode) DBPath() string {
   596  	return hn.Cfg.DBPath()
   597  }
   598  
   599  // DBDir returns the path for the directory holding channeldb file(s).
   600  func (hn *HarnessNode) DBDir() string {
   601  	return hn.Cfg.DBDir()
   602  }
   603  
   604  // Name returns the name of this node set during initialization.
   605  func (hn *HarnessNode) Name() string {
   606  	return hn.Cfg.Name
   607  }
   608  
   609  // TLSCertStr returns the path where the TLS certificate is stored.
   610  func (hn *HarnessNode) TLSCertStr() string {
   611  	return hn.Cfg.TLSCertPath
   612  }
   613  
   614  // TLSKeyStr returns the path where the TLS key is stored.
   615  func (hn *HarnessNode) TLSKeyStr() string {
   616  	return hn.Cfg.TLSKeyPath
   617  }
   618  
   619  // ChanBackupPath returns the fielpath to the on-disk channel.backup file for
   620  // this node.
   621  func (hn *HarnessNode) ChanBackupPath() string {
   622  	return hn.Cfg.ChanBackupPath()
   623  }
   624  
   625  // AdminMacPath returns the filepath to the admin.macaroon file for this node.
   626  func (hn *HarnessNode) AdminMacPath() string {
   627  	return hn.Cfg.AdminMacPath
   628  }
   629  
   630  // ReadMacPath returns the filepath to the readonly.macaroon file for this node.
   631  func (hn *HarnessNode) ReadMacPath() string {
   632  	return hn.Cfg.ReadMacPath
   633  }
   634  
   635  // InvoiceMacPath returns the filepath to the invoice.macaroon file for this
   636  // node.
   637  func (hn *HarnessNode) InvoiceMacPath() string {
   638  	return hn.Cfg.InvoiceMacPath
   639  }
   640  
   641  // startLnd handles the startup of lnd, creating log files, and possibly kills
   642  // the process when needed.
   643  func (hn *HarnessNode) startLnd(lndBinary string, lndError chan<- error) error {
   644  	args := hn.Cfg.GenArgs()
   645  	hn.cmd = exec.Command(lndBinary, args...)
   646  
   647  	// Redirect stderr output to buffer
   648  	var errb bytes.Buffer
   649  	hn.cmd.Stderr = &errb
   650  
   651  	// If the logoutput flag is passed, redirect output from the nodes to
   652  	// log files.
   653  	var (
   654  		fileName string
   655  		err      error
   656  	)
   657  	if *logOutput {
   658  		fileName, err = addLogFile(hn)
   659  		if err != nil {
   660  			return err
   661  		}
   662  	}
   663  
   664  	if hn.Cfg.RemoteWallet {
   665  		err := hn.startRemoteWallet()
   666  		if err != nil {
   667  			return fmt.Errorf("unable to start remote dcrwallet: %v", err)
   668  		}
   669  	}
   670  
   671  	if err := hn.cmd.Start(); err != nil {
   672  		return fmt.Errorf("unable to start %s's dcrlnd-itest: %v", hn.Name(), err)
   673  	}
   674  
   675  	// Launch a new goroutine which that bubbles up any potential fatal
   676  	// process errors to the goroutine running the tests.
   677  	hn.wg.Add(1)
   678  	go func() {
   679  		defer hn.wg.Done()
   680  
   681  		err := hn.cmd.Wait()
   682  		if err != nil {
   683  			lndError <- fmt.Errorf("%v\n%v", err, errb.String())
   684  		}
   685  
   686  		// Make sure log file is closed and renamed if necessary.
   687  		finalizeLogfile(hn, fileName)
   688  
   689  		// Rename the etcd.log file if the node was running on embedded
   690  		// etcd.
   691  		finalizeEtcdLog(hn)
   692  	}()
   693  
   694  	// Do the same for remote wallet.
   695  	if hn.walletCmd != nil {
   696  		hn.wg.Add(1)
   697  		go func() {
   698  			defer hn.wg.Done()
   699  			err := hn.walletCmd.Wait()
   700  			if err != nil {
   701  				lndError <- fmt.Errorf("wallet error during final wait: %v", err)
   702  			}
   703  		}()
   704  	}
   705  
   706  	return nil
   707  }
   708  
   709  // Start launches a new process running lnd. Additionally, the PID of the
   710  // launched process is saved in order to possibly kill the process forcibly
   711  // later.
   712  //
   713  // This may not clean up properly if an error is returned, so the caller should
   714  // call shutdown() regardless of the return value.
   715  func (hn *HarnessNode) start(lndBinary string, lndError chan<- error,
   716  	wait bool) error {
   717  
   718  	// Init the runCtx.
   719  	ctxt, cancel := context.WithCancel(context.Background())
   720  	hn.runCtx = ctxt
   721  	hn.cancel = cancel
   722  
   723  	// Start lnd and prepare logs.
   724  	if err := hn.startLnd(lndBinary, lndError); err != nil {
   725  		return err
   726  	}
   727  
   728  	// We may want to skip waiting for the node to come up (eg. the node
   729  	// is waiting to become the leader).
   730  	if !wait {
   731  		return nil
   732  	}
   733  
   734  	// Since Stop uses the LightningClient to stop the node, if we fail to
   735  	// get a connected client, we have to kill the process.
   736  	useMacaroons := !hn.Cfg.HasSeed && !hn.Cfg.RemoteWallet
   737  	conn, err := hn.ConnectRPC(useMacaroons)
   738  	if err != nil {
   739  		err = fmt.Errorf("ConnectRPC err: %w", err)
   740  		cmdErr := hn.cmd.Process.Kill()
   741  		if cmdErr != nil {
   742  			err = fmt.Errorf("kill process got err: %w: %v",
   743  				cmdErr, err)
   744  		}
   745  		return err
   746  	}
   747  
   748  	// Init all the RPC clients.
   749  	hn.InitRPCClients(conn)
   750  
   751  	if err := hn.WaitUntilStarted(); err != nil {
   752  		return err
   753  	}
   754  
   755  	// If the node was created with a seed, we will need to perform an
   756  	// additional step to unlock the wallet. The connection returned will
   757  	// only use the TLS certs, and can only perform operations necessary to
   758  	// unlock the daemon.
   759  	if hn.Cfg.HasSeed {
   760  		// TODO(yy): remove
   761  		hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn)
   762  		return nil
   763  	}
   764  
   765  	if hn.Cfg.RemoteWallet {
   766  		hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn)
   767  		err := hn.unlockRemoteWallet()
   768  		if err != nil {
   769  			hn.cmd.Process.Kill()
   770  			hn.walletCmd.Process.Kill()
   771  			return fmt.Errorf("unable to init remote wallet: %v", err)
   772  		}
   773  		return nil
   774  	}
   775  
   776  	return hn.initLightningClient()
   777  }
   778  
   779  // WaitUntilStarted waits until the wallet state flips from "WAITING_TO_START".
   780  func (hn *HarnessNode) WaitUntilStarted() error {
   781  	return hn.waitTillServerState(func(s lnrpc.WalletState) bool {
   782  		return s != lnrpc.WalletState_WAITING_TO_START
   783  	})
   784  }
   785  
   786  // WaitUntilStateReached waits until the given wallet state (or one of the
   787  // states following it) has been reached.
   788  func (hn *HarnessNode) WaitUntilStateReached(
   789  	desiredState lnrpc.WalletState) error {
   790  
   791  	return hn.waitTillServerState(func(s lnrpc.WalletState) bool {
   792  		return s >= desiredState
   793  	})
   794  }
   795  
   796  // WaitUntilServerActive waits until the lnd daemon is fully started.
   797  func (hn *HarnessNode) WaitUntilServerActive() error {
   798  	return hn.waitTillServerState(func(s lnrpc.WalletState) bool {
   799  		return s == lnrpc.WalletState_SERVER_ACTIVE
   800  	})
   801  }
   802  
   803  // WaitUntilLeader attempts to finish the start procedure by initiating an RPC
   804  // connection and setting up the wallet unlocker client. This is needed when
   805  // a node that has recently been started was waiting to become the leader and
   806  // we're at the point when we expect that it is the leader now (awaiting
   807  // unlock).
   808  func (hn *HarnessNode) WaitUntilLeader(timeout time.Duration) error {
   809  	var (
   810  		conn    *grpc.ClientConn
   811  		connErr error
   812  	)
   813  
   814  	if err := wait.NoError(func() error {
   815  		conn, connErr = hn.ConnectRPC(!hn.Cfg.HasSeed)
   816  		return connErr
   817  	}, timeout); err != nil {
   818  		return err
   819  	}
   820  
   821  	// Init all the RPC clients.
   822  	hn.InitRPCClients(conn)
   823  
   824  	if err := hn.WaitUntilStarted(); err != nil {
   825  		return err
   826  	}
   827  
   828  	// If the node was created with a seed, we will need to perform an
   829  	// additional step to unlock the wallet. The connection returned will
   830  	// only use the TLS certs, and can only perform operations necessary to
   831  	// unlock the daemon.
   832  	if hn.Cfg.HasSeed {
   833  		// TODO(yy): remove
   834  		hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn)
   835  
   836  		return nil
   837  	}
   838  
   839  	return hn.initLightningClient()
   840  }
   841  
   842  // initClientWhenReady waits until the main gRPC server is detected as active,
   843  // then complete the normal HarnessNode gRPC connection creation. If the node
   844  // is initialized stateless, the macaroon is returned so that the client can
   845  // use it.
   846  func (hn *HarnessNode) initClientWhenReady(stateless bool,
   847  	macBytes []byte) error {
   848  
   849  	// Wait for the wallet to finish unlocking, such that we can connect to
   850  	// it via a macaroon-authenticated rpc connection.
   851  	var (
   852  		conn *grpc.ClientConn
   853  		err  error
   854  	)
   855  	if err = wait.NoError(func() error {
   856  		// If the node has been initialized stateless, we need to pass
   857  		// the macaroon to the client.
   858  		if stateless {
   859  			adminMac := &macaroon.Macaroon{}
   860  			err := adminMac.UnmarshalBinary(macBytes)
   861  			if err != nil {
   862  				return fmt.Errorf("unmarshal failed: %w", err)
   863  			}
   864  			conn, err = hn.ConnectRPCWithMacaroon(adminMac)
   865  			return err
   866  		}
   867  
   868  		// Normal initialization, we expect a macaroon to be in the
   869  		// file system.
   870  		conn, err = hn.ConnectRPC(true)
   871  		return err
   872  	}, DefaultTimeout); err != nil {
   873  		return fmt.Errorf("timeout while init client: %w", err)
   874  	}
   875  
   876  	// Init all the RPC clients.
   877  	hn.InitRPCClients(conn)
   878  
   879  	return hn.initLightningClient()
   880  }
   881  
   882  func (hn *HarnessNode) initRemoteWallet(ctx context.Context,
   883  	initReq *lnrpc.InitWalletRequest) (*lnrpc.InitWalletResponse, error) {
   884  
   885  	var mnemonic aezeed.Mnemonic
   886  	copy(mnemonic[:], initReq.CipherSeedMnemonic)
   887  	deciphered, err := mnemonic.Decipher(initReq.AezeedPassphrase)
   888  	if err != nil {
   889  		return nil, err
   890  	}
   891  	// The returned HD seed are the last 16 bytes of the deciphered aezeed
   892  	// byte slice.
   893  	seed := deciphered[len(deciphered)-16:]
   894  
   895  	loader := pb.NewWalletLoaderServiceClient(hn.walletConn)
   896  	reqCreate := &pb.CreateWalletRequest{
   897  		PrivatePassphrase: initReq.WalletPassword,
   898  		Seed:              seed,
   899  	}
   900  	_, err = loader.CreateWallet(ctx, reqCreate)
   901  	if err != nil {
   902  		return nil, err
   903  	}
   904  
   905  	// Set the wallet to use per-account passphrases.
   906  	wallet := pb.NewWalletServiceClient(hn.walletConn)
   907  	reqSetAcctPwd := &pb.SetAccountPassphraseRequest{
   908  		AccountNumber:        0,
   909  		WalletPassphrase:     initReq.WalletPassword,
   910  		NewAccountPassphrase: initReq.WalletPassword,
   911  	}
   912  	_, err = wallet.SetAccountPassphrase(ctx, reqSetAcctPwd)
   913  	if err != nil {
   914  		return nil, err
   915  	}
   916  
   917  	err = hn.Cfg.BackendCfg.StartWalletSync(loader, initReq.WalletPassword)
   918  	if err != nil {
   919  		return nil, err
   920  	}
   921  	unlockReq := &lnrpc.UnlockWalletRequest{
   922  		WalletPassword: initReq.WalletPassword,
   923  		ChannelBackups: initReq.ChannelBackups,
   924  		RecoveryWindow: initReq.RecoveryWindow,
   925  		StatelessInit:  initReq.StatelessInit,
   926  	}
   927  	unlockRes, err := hn.UnlockWallet(ctx, unlockReq)
   928  	if err != nil {
   929  		return nil, fmt.Errorf("unable to unlock wallet: %v", err)
   930  	}
   931  
   932  	// Convert from UnlockWalletResponse to InitWalletResponse so that
   933  	// the caller may verify the macaroon generation when initializing in
   934  	// stateless mode.
   935  	return &lnrpc.InitWalletResponse{AdminMacaroon: unlockRes.AdminMacaroon}, nil
   936  }
   937  
   938  // Init initializes a harness node by passing the init request via rpc. After
   939  // the request is submitted, this method will block until a
   940  // macaroon-authenticated RPC connection can be established to the harness
   941  // node. Once established, the new connection is used to initialize the
   942  // LightningClient and subscribes the HarnessNode to topology changes.
   943  func (hn *HarnessNode) Init(
   944  	initReq *lnrpc.InitWalletRequest) (*lnrpc.InitWalletResponse, error) {
   945  
   946  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
   947  	defer cancel()
   948  
   949  	var response *lnrpc.InitWalletResponse
   950  	var err error
   951  	if hn.Cfg.RemoteWallet {
   952  		response, err = hn.initRemoteWallet(ctxt, initReq)
   953  	} else {
   954  		response, err = hn.rpc.WalletUnlocker.InitWallet(ctxt, initReq)
   955  	}
   956  
   957  	if err != nil {
   958  		return nil, fmt.Errorf("failed to init wallet: %w", err)
   959  	}
   960  
   961  	err = hn.initClientWhenReady(
   962  		initReq.StatelessInit, response.AdminMacaroon,
   963  	)
   964  	if err != nil {
   965  		return nil, fmt.Errorf("faied to init: %w", err)
   966  	}
   967  
   968  	return response, nil
   969  }
   970  
   971  // InitChangePassword initializes a harness node by passing the change password
   972  // request via RPC. After the request is submitted, this method will block until
   973  // a macaroon-authenticated RPC connection can be established to the harness
   974  // node. Once established, the new connection is used to initialize the
   975  // LightningClient and subscribes the HarnessNode to topology changes.
   976  func (hn *HarnessNode) InitChangePassword(
   977  	chngPwReq *lnrpc.ChangePasswordRequest) (*lnrpc.ChangePasswordResponse,
   978  	error) {
   979  
   980  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
   981  	defer cancel()
   982  
   983  	response, err := hn.rpc.WalletUnlocker.ChangePassword(ctxt, chngPwReq)
   984  	if err != nil {
   985  		return nil, err
   986  	}
   987  	err = hn.initClientWhenReady(
   988  		chngPwReq.StatelessInit, response.AdminMacaroon,
   989  	)
   990  	if err != nil {
   991  		return nil, err
   992  	}
   993  
   994  	return response, nil
   995  }
   996  
   997  // Unlock attempts to unlock the wallet of the target HarnessNode. This method
   998  // should be called after the restart of a HarnessNode that was created with a
   999  // seed+password. Once this method returns, the HarnessNode will be ready to
  1000  // accept normal gRPC requests and harness command.
  1001  func (hn *HarnessNode) Unlock(unlockReq *lnrpc.UnlockWalletRequest) error {
  1002  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1003  	defer cancel()
  1004  
  1005  	// Otherwise, we'll need to unlock the node before it's able to start
  1006  	// up properly.
  1007  	_, err := hn.rpc.WalletUnlocker.UnlockWallet(ctxt, unlockReq)
  1008  	if err != nil {
  1009  		return err
  1010  	}
  1011  
  1012  	// Now that the wallet has been unlocked, we'll wait for the RPC client
  1013  	// to be ready, then establish the normal gRPC connection.
  1014  	return hn.initClientWhenReady(false, nil)
  1015  }
  1016  
  1017  // waitTillServerState makes a subscription to the server's state change and
  1018  // blocks until the server is in the targeted state.
  1019  func (hn *HarnessNode) waitTillServerState(
  1020  	predicate func(state lnrpc.WalletState) bool) error {
  1021  
  1022  	ctxt, cancel := context.WithTimeout(hn.runCtx, NodeStartTimeout)
  1023  	defer cancel()
  1024  
  1025  	client, err := hn.rpc.State.SubscribeState(
  1026  		ctxt, &lnrpc.SubscribeStateRequest{},
  1027  	)
  1028  	if err != nil {
  1029  		return fmt.Errorf("failed to subscribe to state: %w", err)
  1030  	}
  1031  
  1032  	errChan := make(chan error, 1)
  1033  	done := make(chan struct{})
  1034  	go func() {
  1035  		for {
  1036  			resp, err := client.Recv()
  1037  			if err != nil {
  1038  				errChan <- err
  1039  				return
  1040  			}
  1041  
  1042  			if predicate(resp.State) {
  1043  				close(done)
  1044  				return
  1045  			}
  1046  		}
  1047  	}()
  1048  
  1049  	var lastErr error
  1050  	for {
  1051  		select {
  1052  		case err := <-errChan:
  1053  			lastErr = err
  1054  
  1055  		case <-done:
  1056  			return nil
  1057  
  1058  		case <-time.After(NodeStartTimeout):
  1059  			return fmt.Errorf("timeout waiting for state, "+
  1060  				"got err from stream: %v", lastErr)
  1061  		}
  1062  	}
  1063  }
  1064  
  1065  // InitRPCClients initializes a list of RPC clients for the node.
  1066  func (hn *HarnessNode) InitRPCClients(c *grpc.ClientConn) {
  1067  	hn.rpc = &RPCClients{
  1068  		conn:             c,
  1069  		LN:               lnrpc.NewLightningClient(c),
  1070  		Invoice:          invoicesrpc.NewInvoicesClient(c),
  1071  		Router:           routerrpc.NewRouterClient(c),
  1072  		WalletKit:        walletrpc.NewWalletKitClient(c),
  1073  		WalletUnlocker:   lnrpc.NewWalletUnlockerClient(c),
  1074  		Watchtower:       watchtowerrpc.NewWatchtowerClient(c),
  1075  		WatchtowerClient: wtclientrpc.NewWatchtowerClientClient(c),
  1076  		Signer:           signrpc.NewSignerClient(c),
  1077  		State:            lnrpc.NewStateClient(c),
  1078  	}
  1079  }
  1080  
  1081  // initLightningClient blocks until the lnd server is fully started and
  1082  // subscribes the harness node to graph topology updates. This method also
  1083  // spawns a lightning network watcher for this node, which watches for topology
  1084  // changes.
  1085  func (hn *HarnessNode) initLightningClient() error {
  1086  	// TODO(yy): remove
  1087  	// Construct the LightningClient that will allow us to use the
  1088  	// HarnessNode directly for normal rpc operations.
  1089  	conn := hn.rpc.conn
  1090  	hn.LightningClient = lnrpc.NewLightningClient(conn)
  1091  	hn.InvoicesClient = invoicesrpc.NewInvoicesClient(conn)
  1092  	hn.RouterClient = routerrpc.NewRouterClient(conn)
  1093  	hn.WalletKitClient = walletrpc.NewWalletKitClient(conn)
  1094  	hn.Watchtower = watchtowerrpc.NewWatchtowerClient(conn)
  1095  	hn.WatchtowerClient = wtclientrpc.NewWatchtowerClientClient(conn)
  1096  	hn.SignerClient = signrpc.NewSignerClient(conn)
  1097  	hn.StateClient = lnrpc.NewStateClient(conn)
  1098  
  1099  	// Wait until the server is fully started.
  1100  	if err := hn.WaitUntilServerActive(); err != nil {
  1101  		return err
  1102  	}
  1103  
  1104  	// Set the harness node's pubkey to what the node claims in GetInfo.
  1105  	// The RPC must have been started at this point.
  1106  	if err := hn.FetchNodeInfo(); err != nil {
  1107  		return err
  1108  	}
  1109  
  1110  	err := hn.WaitForBlockchainSync()
  1111  	if err != nil {
  1112  		return fmt.Errorf("initial blockchain sync of %s failed: %v", hn.Name(),
  1113  			err)
  1114  	}
  1115  
  1116  	// Wait until the node has started all subservices.
  1117  	ctxb := context.Background()
  1118  	wait.Predicate(func() bool {
  1119  		info, err := hn.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
  1120  		if err != nil {
  1121  			return false
  1122  		}
  1123  
  1124  		return info.ServerActive
  1125  	}, 30*time.Second)
  1126  
  1127  	// Launch the watcher that will hook into graph related topology change
  1128  	// from the PoV of this node.
  1129  	hn.wg.Add(1)
  1130  	go hn.lightningNetworkWatcher()
  1131  
  1132  	return nil
  1133  }
  1134  
  1135  // FetchNodeInfo queries an unlocked node to retrieve its public key.
  1136  func (hn *HarnessNode) FetchNodeInfo() error {
  1137  	// Obtain the lnid of this node for quick identification purposes.
  1138  	info, err := hn.rpc.LN.GetInfo(hn.runCtx, &lnrpc.GetInfoRequest{})
  1139  	if err != nil {
  1140  		return err
  1141  	}
  1142  
  1143  	hn.PubKeyStr = info.IdentityPubkey
  1144  
  1145  	pubkey, err := hex.DecodeString(info.IdentityPubkey)
  1146  	if err != nil {
  1147  		return err
  1148  	}
  1149  	copy(hn.PubKey[:], pubkey)
  1150  
  1151  	return nil
  1152  }
  1153  
  1154  // AddToLog adds a line of choice to the node's logfile. This is useful
  1155  // to interleave test output with output from the node.
  1156  func (hn *HarnessNode) AddToLog(format string, a ...interface{}) {
  1157  	// If this node was not set up with a log file, just return early.
  1158  	if hn.logFile == nil {
  1159  		return
  1160  	}
  1161  
  1162  	desc := fmt.Sprintf("itest: %s\n", fmt.Sprintf(format, a...))
  1163  	if _, err := hn.logFile.WriteString(desc); err != nil {
  1164  		hn.PrintErr("write to log err: %v", err)
  1165  	}
  1166  }
  1167  
  1168  // ReadMacaroon waits a given duration for the macaroon file to be created. If
  1169  // the file is readable within the timeout, its content is de-serialized as a
  1170  // macaroon and returned.
  1171  func (hn *HarnessNode) ReadMacaroon(macPath string, timeout time.Duration) (
  1172  	*macaroon.Macaroon, error) {
  1173  
  1174  	// Wait until macaroon file is created and has valid content before
  1175  	// using it.
  1176  	var mac *macaroon.Macaroon
  1177  	err := wait.NoError(func() error {
  1178  		macBytes, err := ioutil.ReadFile(macPath)
  1179  		if err != nil {
  1180  			return fmt.Errorf("error reading macaroon file: %v",
  1181  				err)
  1182  		}
  1183  
  1184  		newMac := &macaroon.Macaroon{}
  1185  		if err = newMac.UnmarshalBinary(macBytes); err != nil {
  1186  			return fmt.Errorf("error unmarshalling macaroon file: %v", err)
  1187  		}
  1188  		mac = newMac
  1189  
  1190  		return nil
  1191  	}, time.Second*30)
  1192  
  1193  	return mac, err
  1194  }
  1195  
  1196  // ConnectRPCWithMacaroon uses the TLS certificate and given macaroon to
  1197  // create a gRPC client connection.
  1198  func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) (
  1199  	*grpc.ClientConn, error) {
  1200  
  1201  	// Wait until TLS certificate is created and has valid content before
  1202  	// using it, up to 30 sec.
  1203  	var tlsCreds credentials.TransportCredentials
  1204  	err := wait.NoError(func() error {
  1205  		var err error
  1206  		tlsCreds, err = credentials.NewClientTLSFromFile(
  1207  			hn.Cfg.TLSCertPath, "",
  1208  		)
  1209  		return err
  1210  	}, time.Second*30)
  1211  	if err != nil {
  1212  		return nil, fmt.Errorf("error reading TLS cert: %v", err)
  1213  	}
  1214  
  1215  	opts := []grpc.DialOption{
  1216  		grpc.WithBlock(),
  1217  		grpc.WithTransportCredentials(tlsCreds),
  1218  		grpc.WithConnectParams(grpc.ConnectParams{
  1219  			Backoff: backoff.Config{
  1220  				BaseDelay:  time.Millisecond * 20,
  1221  				Multiplier: 1,
  1222  				Jitter:     0.2,
  1223  				MaxDelay:   time.Millisecond * 20,
  1224  			},
  1225  			MinConnectTimeout: time.Millisecond * 20,
  1226  		}),
  1227  	}
  1228  
  1229  	ctx, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1230  	defer cancel()
  1231  
  1232  	if mac == nil {
  1233  		return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...)
  1234  	}
  1235  	macCred, err := macaroons.NewMacaroonCredential(mac)
  1236  	if err != nil {
  1237  		return nil, fmt.Errorf("error cloning mac: %v", err)
  1238  	}
  1239  	opts = append(opts, grpc.WithPerRPCCredentials(macCred))
  1240  
  1241  	return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...)
  1242  }
  1243  
  1244  // ConnectRPC uses the TLS certificate and admin macaroon files written by the
  1245  // lnd node to create a gRPC client connection.
  1246  func (hn *HarnessNode) ConnectRPC(useMacs bool) (*grpc.ClientConn, error) {
  1247  	// If we don't want to use macaroons, just pass nil, the next method
  1248  	// will handle it correctly.
  1249  	if !useMacs {
  1250  		return hn.ConnectRPCWithMacaroon(nil)
  1251  	}
  1252  
  1253  	// If we should use a macaroon, always take the admin macaroon as a
  1254  	// default.
  1255  	mac, err := hn.ReadMacaroon(hn.Cfg.AdminMacPath, DefaultTimeout)
  1256  	if err != nil {
  1257  		return nil, err
  1258  	}
  1259  	return hn.ConnectRPCWithMacaroon(mac)
  1260  }
  1261  
  1262  // SetExtraArgs assigns the ExtraArgs field for the node's configuration. The
  1263  // changes will take effect on restart.
  1264  func (hn *HarnessNode) SetExtraArgs(extraArgs []string) {
  1265  	hn.Cfg.ExtraArgs = extraArgs
  1266  }
  1267  
  1268  // cleanup cleans up all the temporary files created by the node's process.
  1269  func (hn *HarnessNode) cleanup() error {
  1270  	if hn.backupDbDir != "" {
  1271  		err := os.RemoveAll(hn.backupDbDir)
  1272  		if err != nil {
  1273  			return fmt.Errorf("unable to remove backup dir: %v",
  1274  				err)
  1275  		}
  1276  	}
  1277  
  1278  	return os.RemoveAll(hn.Cfg.BaseDir)
  1279  }
  1280  
  1281  // Stop attempts to stop the active lnd process.
  1282  func (hn *HarnessNode) stop() error {
  1283  	// Do nothing if the process is not running.
  1284  	if hn.runCtx == nil {
  1285  		return nil
  1286  	}
  1287  
  1288  	// If start() failed before creating clients, we will just wait for the
  1289  	// child process to die.
  1290  	if hn.rpc != nil && hn.rpc.LN != nil {
  1291  		// Don't watch for error because sometimes the RPC connection
  1292  		// gets closed before a response is returned.
  1293  		req := lnrpc.StopRequest{}
  1294  		err := wait.NoError(func() error {
  1295  			_, err := hn.rpc.LN.StopDaemon(hn.runCtx, &req)
  1296  			switch {
  1297  			case err == nil:
  1298  				return nil
  1299  
  1300  			// Try again if a recovery/rescan is in progress.
  1301  			case strings.Contains(
  1302  				err.Error(), "recovery in progress",
  1303  			):
  1304  				return err
  1305  
  1306  			default:
  1307  				return nil
  1308  			}
  1309  		}, DefaultTimeout)
  1310  		if err != nil {
  1311  			return err
  1312  		}
  1313  	}
  1314  
  1315  	// Stop the remote dcrwallet instance if running a remote wallet.
  1316  	if hn.walletCmd != nil {
  1317  		hn.walletCmd.Process.Signal(os.Interrupt)
  1318  	}
  1319  
  1320  	// Stop the runCtx and wait for goroutines to finish.
  1321  	hn.cancel()
  1322  
  1323  	// Wait for lnd process to exit.
  1324  	err := wait.NoError(func() error {
  1325  		if hn.cmd.ProcessState == nil {
  1326  			return fmt.Errorf("process did not exit")
  1327  		}
  1328  
  1329  		if !hn.cmd.ProcessState.Exited() {
  1330  			return fmt.Errorf("process did not exit")
  1331  		}
  1332  
  1333  		// Wait for goroutines to be finished.
  1334  		hn.wg.Wait()
  1335  
  1336  		return nil
  1337  	}, DefaultTimeout*2)
  1338  	if err != nil {
  1339  		return err
  1340  	}
  1341  
  1342  	hn.LightningClient = nil
  1343  	hn.WalletUnlockerClient = nil
  1344  	hn.Watchtower = nil
  1345  	hn.WatchtowerClient = nil
  1346  
  1347  	// Close any attempts at further grpc connections.
  1348  	if hn.rpc.conn != nil {
  1349  		err := status.Code(hn.rpc.conn.Close())
  1350  		switch err {
  1351  		case codes.OK:
  1352  			return nil
  1353  
  1354  		// When the context is canceled above, we might get the
  1355  		// following error as the context is no longer active.
  1356  		case codes.Canceled:
  1357  			return nil
  1358  
  1359  		case codes.Unknown:
  1360  			return fmt.Errorf("unknown error attempting to stop "+
  1361  				"grpc client: %v", err)
  1362  
  1363  		default:
  1364  			return fmt.Errorf("error attempting to stop "+
  1365  				"grpc client: %v", err)
  1366  		}
  1367  
  1368  	}
  1369  
  1370  	return nil
  1371  }
  1372  
  1373  // shutdown stops the active lnd process and cleans up any temporary
  1374  // directories created along the way.
  1375  func (hn *HarnessNode) shutdown() error {
  1376  	if err := hn.stop(); err != nil {
  1377  		return err
  1378  	}
  1379  	if err := hn.cleanup(); err != nil {
  1380  		return err
  1381  	}
  1382  	return nil
  1383  }
  1384  
  1385  // kill kills the lnd process
  1386  func (hn *HarnessNode) kill() error {
  1387  	return hn.cmd.Process.Kill()
  1388  }
  1389  
  1390  // P2PAddr returns the configured P2P address for the node.
  1391  func (hn *HarnessNode) P2PAddr() string {
  1392  	return hn.Cfg.P2PAddr()
  1393  }
  1394  
  1395  type chanWatchType uint8
  1396  
  1397  const (
  1398  	// watchOpenChannel specifies that this is a request to watch an open
  1399  	// channel event.
  1400  	watchOpenChannel chanWatchType = iota
  1401  
  1402  	// watchCloseChannel specifies that this is a request to watch a close
  1403  	// channel event.
  1404  	watchCloseChannel
  1405  
  1406  	// watchPolicyUpdate specifies that this is a request to watch a policy
  1407  	// update event.
  1408  	watchPolicyUpdate
  1409  )
  1410  
  1411  // closeChanWatchRequest is a request to the lightningNetworkWatcher to be
  1412  // notified once it's detected within the test Lightning Network, that a
  1413  // channel has either been added or closed.
  1414  type chanWatchRequest struct {
  1415  	chanPoint wire.OutPoint
  1416  
  1417  	chanWatchType chanWatchType
  1418  
  1419  	eventChan chan struct{}
  1420  
  1421  	advertisingNode    string
  1422  	policy             *lnrpc.RoutingPolicy
  1423  	includeUnannounced bool
  1424  }
  1425  
  1426  func (hn *HarnessNode) checkChanPointInGraph(chanPoint wire.OutPoint) bool {
  1427  
  1428  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1429  	defer cancel()
  1430  
  1431  	chanGraph, err := hn.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{})
  1432  	if err != nil {
  1433  		return false
  1434  	}
  1435  
  1436  	targetChanPoint := chanPoint.String()
  1437  	for _, chanEdge := range chanGraph.Edges {
  1438  		candidateChanPoint := chanEdge.ChanPoint
  1439  		if targetChanPoint == candidateChanPoint {
  1440  			return true
  1441  		}
  1442  	}
  1443  
  1444  	return false
  1445  }
  1446  
  1447  // lightningNetworkWatcher is a goroutine which is able to dispatch
  1448  // notifications once it has been observed that a target channel has been
  1449  // closed or opened within the network. In order to dispatch these
  1450  // notifications, the GraphTopologySubscription client exposed as part of the
  1451  // gRPC interface is used.
  1452  func (hn *HarnessNode) lightningNetworkWatcher() {
  1453  	defer hn.wg.Done()
  1454  
  1455  	graphUpdates := make(chan *lnrpc.GraphTopologyUpdate)
  1456  
  1457  	// Start a goroutine to receive graph updates.
  1458  	hn.wg.Add(1)
  1459  	go func() {
  1460  		defer hn.wg.Done()
  1461  		err := hn.receiveTopologyClientStream(graphUpdates)
  1462  
  1463  		if err != nil {
  1464  			hn.PrintErr("receive topology client stream "+
  1465  				"got err:%v", err)
  1466  		}
  1467  	}()
  1468  
  1469  	for {
  1470  		select {
  1471  
  1472  		// A new graph update has just been received, so we'll examine
  1473  		// the current set of registered clients to see if we can
  1474  		// dispatch any requests.
  1475  		case graphUpdate := <-graphUpdates:
  1476  			hn.handleChannelEdgeUpdates(graphUpdate.ChannelUpdates)
  1477  			hn.handleClosedChannelUpdate(graphUpdate.ClosedChans)
  1478  			// TODO(yy): handle node updates too
  1479  
  1480  		// A new watch request, has just arrived. We'll either be able
  1481  		// to dispatch immediately, or need to add the client for
  1482  		// processing later.
  1483  		case watchRequest := <-hn.chanWatchRequests:
  1484  			switch watchRequest.chanWatchType {
  1485  			case watchOpenChannel:
  1486  				// TODO(roasbeef): add update type also, checks
  1487  				// for multiple of 2
  1488  				hn.handleOpenChannelWatchRequest(watchRequest)
  1489  
  1490  			case watchCloseChannel:
  1491  				hn.handleCloseChannelWatchRequest(watchRequest)
  1492  
  1493  			case watchPolicyUpdate:
  1494  				hn.handlePolicyUpdateWatchRequest(watchRequest)
  1495  			}
  1496  
  1497  		case <-hn.runCtx.Done():
  1498  			return
  1499  		}
  1500  	}
  1501  }
  1502  
  1503  // WaitForNetworkChannelOpen will block until a channel with the target
  1504  // outpoint is seen as being fully advertised within the network. A channel is
  1505  // considered "fully advertised" once both of its directional edges has been
  1506  // advertised within the test Lightning Network.
  1507  func (hn *HarnessNode) WaitForNetworkChannelOpen(
  1508  	chanPoint *lnrpc.ChannelPoint) error {
  1509  
  1510  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1511  	defer cancel()
  1512  
  1513  	eventChan := make(chan struct{})
  1514  
  1515  	op, err := MakeOutpoint(chanPoint)
  1516  	if err != nil {
  1517  		return fmt.Errorf("failed to create outpoint for %v "+
  1518  			"got err: %v", chanPoint, err)
  1519  	}
  1520  
  1521  	hn.LogPrintf("Going to wait for open of %s", op)
  1522  	hn.chanWatchRequests <- &chanWatchRequest{
  1523  		chanPoint:     op,
  1524  		eventChan:     eventChan,
  1525  		chanWatchType: watchOpenChannel,
  1526  	}
  1527  
  1528  	select {
  1529  	case <-eventChan:
  1530  		return nil
  1531  	case <-ctxt.Done():
  1532  		return fmt.Errorf("channel:%s not opened before timeout: %s",
  1533  			op, hn)
  1534  	}
  1535  }
  1536  
  1537  // WaitForNetworkChannelClose will block until a channel with the target
  1538  // outpoint is seen as closed within the network. A channel is considered
  1539  // closed once a transaction spending the funding outpoint is seen within a
  1540  // confirmed block.
  1541  func (hn *HarnessNode) WaitForNetworkChannelClose(
  1542  	chanPoint *lnrpc.ChannelPoint) error {
  1543  
  1544  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1545  	defer cancel()
  1546  
  1547  	eventChan := make(chan struct{})
  1548  
  1549  	op, err := MakeOutpoint(chanPoint)
  1550  	if err != nil {
  1551  		return fmt.Errorf("failed to create outpoint for %v "+
  1552  			"got err: %v", chanPoint, err)
  1553  	}
  1554  
  1555  	hn.chanWatchRequests <- &chanWatchRequest{
  1556  		chanPoint:     op,
  1557  		eventChan:     eventChan,
  1558  		chanWatchType: watchCloseChannel,
  1559  	}
  1560  
  1561  	select {
  1562  	case <-eventChan:
  1563  		return nil
  1564  	case <-ctxt.Done():
  1565  		return fmt.Errorf("channel:%s not closed before timeout: "+
  1566  			"%s", op, hn)
  1567  	}
  1568  }
  1569  
  1570  // WaitForChannelPolicyUpdate will block until a channel policy with the target
  1571  // outpoint and advertisingNode is seen within the network.
  1572  func (hn *HarnessNode) WaitForChannelPolicyUpdate(
  1573  	advertisingNode string, policy *lnrpc.RoutingPolicy,
  1574  	chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) error {
  1575  
  1576  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1577  	defer cancel()
  1578  
  1579  	eventChan := make(chan struct{})
  1580  
  1581  	op, err := MakeOutpoint(chanPoint)
  1582  	if err != nil {
  1583  		return fmt.Errorf("failed to create outpoint for %v"+
  1584  			"got err: %v", chanPoint, err)
  1585  	}
  1586  
  1587  	ticker := time.NewTicker(wait.PollInterval)
  1588  	defer ticker.Stop()
  1589  
  1590  	for {
  1591  		select {
  1592  		// Send a watch request every second.
  1593  		case <-ticker.C:
  1594  			// Did the event can close in the meantime? We want to
  1595  			// avoid a "close of closed channel" panic since we're
  1596  			// re-using the same event chan for multiple requests.
  1597  			select {
  1598  			case <-eventChan:
  1599  				return nil
  1600  			default:
  1601  			}
  1602  
  1603  			hn.chanWatchRequests <- &chanWatchRequest{
  1604  				chanPoint:          op,
  1605  				eventChan:          eventChan,
  1606  				chanWatchType:      watchPolicyUpdate,
  1607  				policy:             policy,
  1608  				advertisingNode:    advertisingNode,
  1609  				includeUnannounced: includeUnannounced,
  1610  			}
  1611  
  1612  		case <-eventChan:
  1613  			return nil
  1614  
  1615  		case <-ctxt.Done():
  1616  			return fmt.Errorf("channel:%s policy not updated "+
  1617  				"before timeout: [%s:%v] %s", op,
  1618  				advertisingNode, policy, hn.String())
  1619  		}
  1620  	}
  1621  }
  1622  
  1623  // WaitForBlockchainSync waits for the target node to be fully synchronized
  1624  // with the blockchain. If the passed context object has a set timeout, it will
  1625  // continually poll until the timeout has elapsed. In the case that the chain
  1626  // isn't synced before the timeout is up, this function will return an error.
  1627  func (hn *HarnessNode) WaitForBlockchainSync() error {
  1628  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1629  	defer cancel()
  1630  
  1631  	ticker := time.NewTicker(time.Millisecond * 100)
  1632  	defer ticker.Stop()
  1633  
  1634  	for {
  1635  		resp, err := hn.rpc.LN.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
  1636  		if err != nil {
  1637  			return err
  1638  		}
  1639  		if resp.SyncedToChain {
  1640  			return nil
  1641  		}
  1642  
  1643  		select {
  1644  		case <-ctxt.Done():
  1645  			return fmt.Errorf("timeout while waiting for " +
  1646  				"blockchain sync")
  1647  		case <-hn.runCtx.Done():
  1648  			return nil
  1649  		case <-ticker.C:
  1650  		}
  1651  	}
  1652  }
  1653  
  1654  // WaitForBlockHeight  will block until the target node syncs to the given
  1655  // block height or the context expires.
  1656  func (hn *HarnessNode) WaitForBlockHeight(ctx context.Context, height uint32) error {
  1657  	errChan := make(chan error, 1)
  1658  	retryDelay := time.Millisecond * 100
  1659  
  1660  	go func() {
  1661  		for {
  1662  			select {
  1663  			case <-ctx.Done():
  1664  			case <-hn.runCtx.Done():
  1665  				return
  1666  			default:
  1667  			}
  1668  
  1669  			getInfoReq := &lnrpc.GetInfoRequest{}
  1670  			getInfoResp, err := hn.GetInfo(ctx, getInfoReq)
  1671  			if err != nil {
  1672  				errChan <- err
  1673  				return
  1674  			}
  1675  			if getInfoResp.SyncedToChain && getInfoResp.BlockHeight == height {
  1676  				errChan <- nil
  1677  				return
  1678  			}
  1679  
  1680  			select {
  1681  			case <-ctx.Done():
  1682  				return
  1683  			case <-time.After(retryDelay):
  1684  			}
  1685  		}
  1686  	}()
  1687  
  1688  	select {
  1689  	case <-hn.runCtx.Done():
  1690  		return nil
  1691  	case err := <-errChan:
  1692  		return err
  1693  	case <-ctx.Done():
  1694  		return fmt.Errorf("timeout while waiting for blockchain sync")
  1695  	}
  1696  }
  1697  
  1698  // WaitForBalance waits until the node sees the expected confirmed/unconfirmed
  1699  // balance within their wallet.
  1700  func (hn *HarnessNode) WaitForBalance(expectedBalance dcrutil.Amount,
  1701  	confirmed bool) error {
  1702  
  1703  	req := &lnrpc.WalletBalanceRequest{}
  1704  
  1705  	var lastBalance dcrutil.Amount
  1706  	doesBalanceMatch := func() bool {
  1707  		balance, err := hn.rpc.LN.WalletBalance(hn.runCtx, req)
  1708  		if err != nil {
  1709  			return false
  1710  		}
  1711  
  1712  		if confirmed {
  1713  			lastBalance = dcrutil.Amount(balance.ConfirmedBalance)
  1714  			return dcrutil.Amount(balance.ConfirmedBalance) ==
  1715  				expectedBalance
  1716  		}
  1717  
  1718  		lastBalance = dcrutil.Amount(balance.UnconfirmedBalance)
  1719  		return dcrutil.Amount(balance.UnconfirmedBalance) ==
  1720  			expectedBalance
  1721  	}
  1722  
  1723  	err := wait.Predicate(doesBalanceMatch, DefaultTimeout)
  1724  	if err != nil {
  1725  		return fmt.Errorf("balances not synced after deadline: "+
  1726  			"expected %v, only have %v", expectedBalance,
  1727  			lastBalance)
  1728  	}
  1729  
  1730  	return nil
  1731  }
  1732  
  1733  // PrintErr prints an error to the console.
  1734  func (hn *HarnessNode) PrintErr(format string, a ...interface{}) {
  1735  	fmt.Printf("itest error from [node:%s]: %s\n",
  1736  		hn.Cfg.Name, fmt.Sprintf(format, a...))
  1737  }
  1738  
  1739  // handleChannelEdgeUpdates takes a series of channel edge updates, extracts
  1740  // the outpoints, and saves them to harness node's internal state.
  1741  func (hn *HarnessNode) handleChannelEdgeUpdates(
  1742  	updates []*lnrpc.ChannelEdgeUpdate) {
  1743  
  1744  	// For each new channel, we'll increment the number of
  1745  	// edges seen by one.
  1746  	for _, newChan := range updates {
  1747  		op, err := MakeOutpoint(newChan.ChanPoint)
  1748  		if err != nil {
  1749  			hn.PrintErr("failed to create outpoint for %v "+
  1750  				"got err: %v", newChan.ChanPoint, err)
  1751  			return
  1752  		}
  1753  		hn.openChans[op]++
  1754  
  1755  		hn.LogPrintf("Got open chan update for %s (edges %d, watchers %d)",
  1756  			op, hn.openChans[op], len(hn.openChanWatchers[op]))
  1757  
  1758  		// For this new channel, if the number of edges seen is less
  1759  		// than two, then the channel hasn't been fully announced yet.
  1760  		if numEdges := hn.openChans[op]; numEdges < 2 {
  1761  			return
  1762  		}
  1763  
  1764  		// Otherwise, we'll notify all the registered watchers and
  1765  		// remove the dispatched watchers.
  1766  		for _, eventChan := range hn.openChanWatchers[op] {
  1767  			close(eventChan)
  1768  		}
  1769  		delete(hn.openChanWatchers, op)
  1770  
  1771  		// Check whether there's a routing policy update. If so, save
  1772  		// it to the node state.
  1773  		if newChan.RoutingPolicy == nil {
  1774  			continue
  1775  		}
  1776  
  1777  		// Append the policy to the slice.
  1778  		node := newChan.AdvertisingNode
  1779  		policies := hn.policyUpdates[op.String()]
  1780  
  1781  		// If the map[op] is nil, we need to initialize the map first.
  1782  		if policies == nil {
  1783  			policies = make(map[string][]*lnrpc.RoutingPolicy)
  1784  		}
  1785  		policies[node] = append(
  1786  			policies[node], newChan.RoutingPolicy,
  1787  		)
  1788  		hn.policyUpdates[op.String()] = policies
  1789  	}
  1790  }
  1791  
  1792  // handleOpenChannelWatchRequest processes a watch open channel request by
  1793  // checking the number of the edges seen for a given channel point. If the
  1794  // number is no less than 2 then the channel is considered open. Otherwise, we
  1795  // will attempt to find it in its channel graph. If neither can be found, the
  1796  // request is added to a watch request list than will be handled by
  1797  // handleChannelEdgeUpdates.
  1798  func (hn *HarnessNode) handleOpenChannelWatchRequest(req *chanWatchRequest) {
  1799  	targetChan := req.chanPoint
  1800  
  1801  	// If this is an open request, then it can be dispatched if the number
  1802  	// of edges seen for the channel is at least two.
  1803  	if numEdges := hn.openChans[targetChan]; numEdges >= 2 {
  1804  		hn.LogPrintf("Already have targetChan opened: %s", targetChan)
  1805  		close(req.eventChan)
  1806  		return
  1807  	}
  1808  
  1809  	// Before we add the channel to our set of open clients, we'll check to
  1810  	// see if the channel is already in the channel graph of the target
  1811  	// node. This lets us handle the case where a node has already seen a
  1812  	// channel before a notification has been requested, causing us to miss
  1813  	// it.
  1814  	chanFound := hn.checkChanPointInGraph(targetChan)
  1815  	if chanFound {
  1816  		hn.LogPrintf("Already have targetChan in graph: %s", targetChan)
  1817  		close(req.eventChan)
  1818  		return
  1819  	}
  1820  
  1821  	// Otherwise, we'll add this to the list of open channel watchers for
  1822  	// this out point.
  1823  	hn.openChanWatchers[targetChan] = append(
  1824  		hn.openChanWatchers[targetChan],
  1825  		req.eventChan,
  1826  	)
  1827  	hn.LogPrintf("Registered targetChan to wait for open: %s", targetChan)
  1828  }
  1829  
  1830  // handleClosedChannelUpdate takes a series of closed channel updates, extracts
  1831  // the outpoints, saves them to harness node's internal state, and notifies all
  1832  // registered clients.
  1833  func (hn *HarnessNode) handleClosedChannelUpdate(
  1834  	updates []*lnrpc.ClosedChannelUpdate) {
  1835  
  1836  	// For each channel closed, we'll mark that we've detected a channel
  1837  	// closure while lnd was pruning the channel graph.
  1838  	for _, closedChan := range updates {
  1839  		op, err := MakeOutpoint(closedChan.ChanPoint)
  1840  		if err != nil {
  1841  			hn.PrintErr("failed to create outpoint for %v "+
  1842  				"got err: %v", closedChan.ChanPoint, err)
  1843  			return
  1844  		}
  1845  
  1846  		hn.closedChans[op] = struct{}{}
  1847  
  1848  		// As the channel has been closed, we'll notify all register
  1849  		// watchers.
  1850  		for _, eventChan := range hn.closeChanWatchers[op] {
  1851  			close(eventChan)
  1852  		}
  1853  		delete(hn.closeChanWatchers, op)
  1854  	}
  1855  }
  1856  
  1857  // handleCloseChannelWatchRequest processes a watch close channel request by
  1858  // checking whether the given channel point can be found in the node's internal
  1859  // state. If not, the request is added to a watch request list than will be
  1860  // handled by handleCloseChannelWatchRequest.
  1861  func (hn *HarnessNode) handleCloseChannelWatchRequest(req *chanWatchRequest) {
  1862  	targetChan := req.chanPoint
  1863  
  1864  	// If this is a close request, then it can be immediately dispatched if
  1865  	// we've already seen a channel closure for this channel.
  1866  	if _, ok := hn.closedChans[targetChan]; ok {
  1867  		close(req.eventChan)
  1868  		return
  1869  	}
  1870  
  1871  	// Otherwise, we'll add this to the list of close channel watchers for
  1872  	// this out point.
  1873  	hn.closeChanWatchers[targetChan] = append(
  1874  		hn.closeChanWatchers[targetChan],
  1875  		req.eventChan,
  1876  	)
  1877  }
  1878  
  1879  type topologyClient lnrpc.Lightning_SubscribeChannelGraphClient
  1880  
  1881  // newTopologyClient creates a topology client.
  1882  func (hn *HarnessNode) newTopologyClient(
  1883  	ctx context.Context) (topologyClient, error) {
  1884  
  1885  	req := &lnrpc.GraphTopologySubscription{}
  1886  	client, err := hn.rpc.LN.SubscribeChannelGraph(ctx, req)
  1887  	if err != nil {
  1888  		return nil, fmt.Errorf("%s(%d): unable to create topology "+
  1889  			"client: %v (%s)", hn.Name(), hn.NodeID, err,
  1890  			time.Now().String())
  1891  	}
  1892  
  1893  	return client, nil
  1894  }
  1895  
  1896  // receiveTopologyClientStream initializes a topologyClient to subscribe
  1897  // topology update events. Due to a race condition between the ChannelRouter
  1898  // starting and us making the subscription request, it's possible for our graph
  1899  // subscription to fail. In that case, we will retry the subscription until it
  1900  // succeeds or fail after 10 seconds.
  1901  //
  1902  // NOTE: must be run as a goroutine.
  1903  func (hn *HarnessNode) receiveTopologyClientStream(
  1904  	receiver chan *lnrpc.GraphTopologyUpdate) error {
  1905  
  1906  	// Create a topology client to receive graph updates.
  1907  	client, err := hn.newTopologyClient(hn.runCtx)
  1908  	if err != nil {
  1909  		return fmt.Errorf("create topologyClient failed: %w", err)
  1910  	}
  1911  
  1912  	// We use the context to time out when retrying graph subscription.
  1913  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  1914  	defer cancel()
  1915  
  1916  	for {
  1917  		update, err := client.Recv()
  1918  
  1919  		switch {
  1920  		case err == nil:
  1921  			// Good case. We will send the update to the receiver.
  1922  
  1923  		case strings.Contains(err.Error(), "router not started"):
  1924  			// If the router hasn't been started, we will retry
  1925  			// every 200 ms until it has been started or fail
  1926  			// after the ctxt is timed out.
  1927  			select {
  1928  			case <-ctxt.Done():
  1929  				return fmt.Errorf("graph subscription: " +
  1930  					"router not started before timeout")
  1931  			case <-time.After(wait.PollInterval):
  1932  			case <-hn.runCtx.Done():
  1933  				return nil
  1934  			}
  1935  
  1936  			// Re-create the topology client.
  1937  			client, err = hn.newTopologyClient(hn.runCtx)
  1938  			if err != nil {
  1939  				return fmt.Errorf("create topologyClient "+
  1940  					"failed: %v", err)
  1941  			}
  1942  
  1943  			continue
  1944  
  1945  		case strings.Contains(err.Error(), "EOF"):
  1946  			// End of subscription stream. Do nothing and quit.
  1947  			return nil
  1948  
  1949  		case strings.Contains(err.Error(), context.Canceled.Error()):
  1950  			// End of subscription stream. Do nothing and quit.
  1951  			return nil
  1952  
  1953  		default:
  1954  			// An expected error is returned, return and leave it
  1955  			// to be handled by the caller.
  1956  			return fmt.Errorf("graph subscription err: %w", err)
  1957  		}
  1958  
  1959  		// Send the update or quit.
  1960  		select {
  1961  		case receiver <- update:
  1962  		case <-hn.runCtx.Done():
  1963  			return nil
  1964  		}
  1965  	}
  1966  }
  1967  
  1968  // handlePolicyUpdateWatchRequest checks that if the expected policy can be
  1969  // found either in the node's interval state or describe graph response. If
  1970  // found, it will signal the request by closing the event channel. Otherwise it
  1971  // does nothing but returns nil.
  1972  func (hn *HarnessNode) handlePolicyUpdateWatchRequest(req *chanWatchRequest) {
  1973  	op := req.chanPoint
  1974  
  1975  	// Get a list of known policies for this chanPoint+advertisingNode
  1976  	// combination. Start searching in the node state first.
  1977  	policies, ok := hn.policyUpdates[op.String()][req.advertisingNode]
  1978  
  1979  	if !ok {
  1980  		// If it cannot be found in the node state, try searching it
  1981  		// from the node's DescribeGraph.
  1982  		policyMap := hn.getChannelPolicies(req.includeUnannounced)
  1983  		policies, ok = policyMap[op.String()][req.advertisingNode]
  1984  		if !ok {
  1985  			return
  1986  		}
  1987  	}
  1988  
  1989  	// Check if there's a matched policy.
  1990  	for _, policy := range policies {
  1991  		if CheckChannelPolicy(policy, req.policy) == nil {
  1992  			close(req.eventChan)
  1993  			return
  1994  		}
  1995  	}
  1996  }
  1997  
  1998  // getChannelPolicies queries the channel graph and formats the policies into
  1999  // the format defined in type policyUpdateMap.
  2000  func (hn *HarnessNode) getChannelPolicies(include bool) policyUpdateMap {
  2001  
  2002  	ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
  2003  	defer cancel()
  2004  
  2005  	graph, err := hn.rpc.LN.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{
  2006  		IncludeUnannounced: include,
  2007  	})
  2008  	if err != nil {
  2009  		hn.PrintErr("DescribeGraph got err: %v", err)
  2010  		return nil
  2011  	}
  2012  
  2013  	policyUpdates := policyUpdateMap{}
  2014  
  2015  	for _, e := range graph.Edges {
  2016  
  2017  		policies := policyUpdates[e.ChanPoint]
  2018  
  2019  		// If the map[op] is nil, we need to initialize the map first.
  2020  		if policies == nil {
  2021  			policies = make(map[string][]*lnrpc.RoutingPolicy)
  2022  		}
  2023  
  2024  		if e.Node1Policy != nil {
  2025  			policies[e.Node1Pub] = append(
  2026  				policies[e.Node1Pub], e.Node1Policy,
  2027  			)
  2028  		}
  2029  
  2030  		if e.Node2Policy != nil {
  2031  			policies[e.Node2Pub] = append(
  2032  				policies[e.Node2Pub], e.Node2Policy,
  2033  			)
  2034  		}
  2035  
  2036  		policyUpdates[e.ChanPoint] = policies
  2037  	}
  2038  
  2039  	return policyUpdates
  2040  }
  2041  
  2042  // renameFile is a helper to rename (log) files created during integration
  2043  // tests.
  2044  func renameFile(fromFileName, toFileName string) {
  2045  	err := os.Rename(fromFileName, toFileName)
  2046  	if err != nil {
  2047  		fmt.Printf("could not rename %s to %s: %v\n",
  2048  			fromFileName, toFileName, err)
  2049  	}
  2050  }
  2051  
  2052  // getFinalizedLogFilePrefix returns the finalize log filename.
  2053  func getFinalizedLogFilePrefix(hn *HarnessNode) string {
  2054  	pubKeyHex := hex.EncodeToString(
  2055  		hn.PubKey[:logPubKeyBytes],
  2056  	)
  2057  
  2058  	return fmt.Sprintf("%s/%0.3d-%s-%s-%s",
  2059  		GetLogDir(), hn.NodeID, hn.Cfg.Name,
  2060  		hn.Cfg.LogFilenamePrefix,
  2061  		pubKeyHex)
  2062  }
  2063  
  2064  // finalizeLogfile makes sure the log file cleanup function is initialized,
  2065  // even if no log file is created.
  2066  func finalizeLogfile(hn *HarnessNode, fileName string) {
  2067  	if hn.logFile != nil {
  2068  		hn.logFile.Close()
  2069  
  2070  		// If logoutput flag is not set, return early.
  2071  		if !*logOutput {
  2072  			return
  2073  		}
  2074  
  2075  		newFileName := fmt.Sprintf("%v.log",
  2076  			getFinalizedLogFilePrefix(hn),
  2077  		)
  2078  
  2079  		renameFile(fileName, newFileName)
  2080  	}
  2081  }
  2082  
  2083  func finalizeEtcdLog(hn *HarnessNode) {
  2084  	if hn.Cfg.DbBackend != BackendEtcd {
  2085  		return
  2086  	}
  2087  
  2088  	etcdLogFileName := fmt.Sprintf("%s/etcd.log", hn.Cfg.LogDir)
  2089  	newEtcdLogFileName := fmt.Sprintf("%v-etcd.log",
  2090  		getFinalizedLogFilePrefix(hn),
  2091  	)
  2092  
  2093  	renameFile(etcdLogFileName, newEtcdLogFileName)
  2094  }
  2095  
  2096  func addLogFile(hn *HarnessNode) (string, error) {
  2097  	var fileName string
  2098  
  2099  	dir := GetLogDir()
  2100  	fileName = fmt.Sprintf("%s/%0.3d-%s-%s-%s.log", dir, hn.NodeID,
  2101  		hn.Cfg.Name, hn.Cfg.LogFilenamePrefix,
  2102  		hex.EncodeToString(hn.PubKey[:logPubKeyBytes]))
  2103  
  2104  	// If the node's PubKey is not yet initialized, create a
  2105  	// temporary file name. Later, after the PubKey has been
  2106  	// initialized, the file can be moved to its final name with
  2107  	// the PubKey included.
  2108  	if bytes.Equal(hn.PubKey[:4], []byte{0, 0, 0, 0}) {
  2109  		fileName = fmt.Sprintf("%s/%0.3d-%s-%s-tmp__.log", dir,
  2110  			hn.NodeID, hn.Cfg.Name, hn.Cfg.LogFilenamePrefix)
  2111  	}
  2112  
  2113  	// Create file if not exists, otherwise append.
  2114  	file, err := os.OpenFile(fileName,
  2115  		os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
  2116  	if err != nil {
  2117  		return fileName, err
  2118  	}
  2119  
  2120  	// Pass node's stderr to both errb and the file.
  2121  	w := io.MultiWriter(hn.cmd.Stderr, file)
  2122  	hn.cmd.Stderr = w
  2123  
  2124  	// Pass the node's stdout only to the file.
  2125  	hn.cmd.Stdout = file
  2126  
  2127  	// Let the node keep a reference to this file, such
  2128  	// that we can add to it if necessary.
  2129  	hn.logFile = file
  2130  
  2131  	return fileName, nil
  2132  }