github.com/decred/dcrlnd@v0.7.6/rpcserver.go (about)

     1  package dcrlnd
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"encoding/hex"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	"math"
    11  	"net/http"
    12  	"runtime"
    13  	"sort"
    14  	"strconv"
    15  	"strings"
    16  	"sync"
    17  	"sync/atomic"
    18  	"time"
    19  
    20  	"decred.org/dcrwallet/v4/wallet/txauthor"
    21  	"github.com/davecgh/go-spew/spew"
    22  	"github.com/decred/dcrd/chaincfg/chainhash"
    23  	"github.com/decred/dcrd/chaincfg/v3"
    24  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    25  	"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
    26  	"github.com/decred/dcrd/dcrutil/v4"
    27  	"github.com/decred/dcrd/txscript/v4/stdaddr"
    28  	"github.com/decred/dcrd/txscript/v4/stdscript"
    29  	"github.com/decred/dcrd/wire"
    30  	"github.com/decred/dcrlnd/autopilot"
    31  	"github.com/decred/dcrlnd/build"
    32  	"github.com/decred/dcrlnd/chainreg"
    33  	"github.com/decred/dcrlnd/chanacceptor"
    34  	"github.com/decred/dcrlnd/chanbackup"
    35  	"github.com/decred/dcrlnd/chanfitness"
    36  	"github.com/decred/dcrlnd/channeldb"
    37  	"github.com/decred/dcrlnd/channelnotifier"
    38  	"github.com/decred/dcrlnd/contractcourt"
    39  	"github.com/decred/dcrlnd/discovery"
    40  	"github.com/decred/dcrlnd/feature"
    41  	"github.com/decred/dcrlnd/funding"
    42  	"github.com/decred/dcrlnd/htlcswitch"
    43  	"github.com/decred/dcrlnd/htlcswitch/hop"
    44  	"github.com/decred/dcrlnd/input"
    45  	"github.com/decred/dcrlnd/internal/psbt"
    46  	"github.com/decred/dcrlnd/invoices"
    47  	"github.com/decred/dcrlnd/keychain"
    48  	"github.com/decred/dcrlnd/kvdb"
    49  	"github.com/decred/dcrlnd/labels"
    50  	"github.com/decred/dcrlnd/lncfg"
    51  	"github.com/decred/dcrlnd/lnrpc"
    52  	"github.com/decred/dcrlnd/lnrpc/invoicesrpc"
    53  	"github.com/decred/dcrlnd/lnrpc/routerrpc"
    54  	"github.com/decred/dcrlnd/lnrpc/walletrpc"
    55  	"github.com/decred/dcrlnd/lntypes"
    56  	"github.com/decred/dcrlnd/lnwallet"
    57  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    58  	"github.com/decred/dcrlnd/lnwallet/chancloser"
    59  	"github.com/decred/dcrlnd/lnwallet/chanfunding"
    60  	"github.com/decred/dcrlnd/lnwallet/dcrwallet"
    61  	"github.com/decred/dcrlnd/lnwire"
    62  	"github.com/decred/dcrlnd/macaroons"
    63  	"github.com/decred/dcrlnd/peer"
    64  	"github.com/decred/dcrlnd/peernotifier"
    65  	"github.com/decred/dcrlnd/record"
    66  	"github.com/decred/dcrlnd/routing"
    67  	"github.com/decred/dcrlnd/routing/route"
    68  	"github.com/decred/dcrlnd/rpcperms"
    69  	"github.com/decred/dcrlnd/signal"
    70  	"github.com/decred/dcrlnd/sweep"
    71  	"github.com/decred/dcrlnd/watchtower"
    72  	"github.com/decred/dcrlnd/zpay32"
    73  
    74  	proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
    75  	"github.com/tv42/zbase32"
    76  	"google.golang.org/grpc"
    77  	"google.golang.org/grpc/codes"
    78  	"google.golang.org/grpc/status"
    79  	"gopkg.in/macaroon-bakery.v2/bakery"
    80  )
    81  
    82  const (
    83  	// MaxDcrPaymentMAtoms is the maximum allowed Decred payment currently
    84  	// permitted as defined in BOLT-0002. This is the same as the maximum
    85  	// channel size.
    86  	maxDcrPaymentMAtoms = lnwire.MilliAtom(funding.MaxDecredFundingAmount * 1000)
    87  )
    88  
    89  var (
    90  	// MaxPaymentMAtoms is the maximum allowed payment currently permitted
    91  	// as defined in BOLT-002. This value depends on which chain is active.
    92  	// It is set to the value under the Decred chain as default.
    93  	MaxPaymentMAtoms = maxDcrPaymentMAtoms
    94  
    95  	// readPermissions is a slice of all entities that allow read
    96  	// permissions for authorization purposes, all lowercase.
    97  	readPermissions = []bakery.Op{
    98  		{
    99  			Entity: "onchain",
   100  			Action: "read",
   101  		},
   102  		{
   103  			Entity: "offchain",
   104  			Action: "read",
   105  		},
   106  		{
   107  			Entity: "address",
   108  			Action: "read",
   109  		},
   110  		{
   111  			Entity: "message",
   112  			Action: "read",
   113  		},
   114  		{
   115  			Entity: "peers",
   116  			Action: "read",
   117  		},
   118  		{
   119  			Entity: "info",
   120  			Action: "read",
   121  		},
   122  		{
   123  			Entity: "invoices",
   124  			Action: "read",
   125  		},
   126  		{
   127  			Entity: "signer",
   128  			Action: "read",
   129  		},
   130  		{
   131  			Entity: "macaroon",
   132  			Action: "read",
   133  		},
   134  	}
   135  
   136  	// writePermissions is a slice of all entities that allow write
   137  	// permissions for authorization purposes, all lowercase.
   138  	writePermissions = []bakery.Op{
   139  		{
   140  			Entity: "onchain",
   141  			Action: "write",
   142  		},
   143  		{
   144  			Entity: "offchain",
   145  			Action: "write",
   146  		},
   147  		{
   148  			Entity: "address",
   149  			Action: "write",
   150  		},
   151  		{
   152  			Entity: "message",
   153  			Action: "write",
   154  		},
   155  		{
   156  			Entity: "peers",
   157  			Action: "write",
   158  		},
   159  		{
   160  			Entity: "info",
   161  			Action: "write",
   162  		},
   163  		{
   164  			Entity: "invoices",
   165  			Action: "write",
   166  		},
   167  		{
   168  			Entity: "signer",
   169  			Action: "generate",
   170  		},
   171  		{
   172  			Entity: "macaroon",
   173  			Action: "generate",
   174  		},
   175  		{
   176  			Entity: "macaroon",
   177  			Action: "write",
   178  		},
   179  	}
   180  
   181  	// invoicePermissions is a slice of all the entities that allows a user
   182  	// to only access calls that are related to invoices, so: streaming
   183  	// RPCs, generating, and listening invoices.
   184  	invoicePermissions = []bakery.Op{
   185  		{
   186  			Entity: "invoices",
   187  			Action: "read",
   188  		},
   189  		{
   190  			Entity: "invoices",
   191  			Action: "write",
   192  		},
   193  		{
   194  			Entity: "address",
   195  			Action: "read",
   196  		},
   197  		{
   198  			Entity: "address",
   199  			Action: "write",
   200  		},
   201  		{
   202  			Entity: "onchain",
   203  			Action: "read",
   204  		},
   205  	}
   206  
   207  	// TODO(guggero): Refactor into constants that are used for all
   208  	// permissions in this file. Also expose the list of possible
   209  	// permissions in an RPC when per RPC permissions are
   210  	// implemented.
   211  	validActions  = []string{"read", "write", "generate"}
   212  	validEntities = []string{
   213  		"onchain", "offchain", "address", "message",
   214  		"peers", "info", "invoices", "signer", "macaroon",
   215  		macaroons.PermissionEntityCustomURI,
   216  	}
   217  
   218  	// If the --no-macaroons flag is used to start lnd, the macaroon service
   219  	// is not initialized. errMacaroonDisabled is then returned when
   220  	// macaroon related services are used.
   221  	errMacaroonDisabled = fmt.Errorf("macaroon authentication disabled, " +
   222  		"remove --no-macaroons flag to enable")
   223  )
   224  
   225  // stringInSlice returns true if a string is contained in the given slice.
   226  func stringInSlice(a string, slice []string) bool {
   227  	for _, b := range slice {
   228  		if b == a {
   229  			return true
   230  		}
   231  	}
   232  	return false
   233  }
   234  
   235  // calculateFeeRate uses atomsPerByte, from a request to calculate the fee
   236  // rate. This is a port from the upstream code to ease porting effort.
   237  func calculateFeeRate(atomsPerByte uint64, targetConf uint32,
   238  	estimator chainfee.Estimator) (chainfee.AtomPerKByte, error) {
   239  
   240  	// Default to satPerVByte, and overwrite it if satPerByte is set.
   241  	atomsPerKb := chainfee.AtomPerKByte(atomsPerByte * 1000)
   242  
   243  	// Based on the passed fee related parameters, we'll determine an
   244  	// appropriate fee rate for this transaction.
   245  	return sweep.DetermineFeePerKB(
   246  		estimator, sweep.FeePreference{
   247  			ConfTarget: targetConf,
   248  			FeeRate:    atomsPerKb,
   249  		},
   250  	)
   251  }
   252  
   253  // GetAllPermissions returns all the permissions required to interact with lnd.
   254  func GetAllPermissions() []bakery.Op {
   255  	allPerms := make([]bakery.Op, 0)
   256  
   257  	// The map will help keep track of which specific permission pairs have
   258  	// already been added to the slice.
   259  	allPermsMap := make(map[string]map[string]struct{})
   260  
   261  	for _, perms := range MainRPCServerPermissions() {
   262  		for _, perm := range perms {
   263  			entity := perm.Entity
   264  			action := perm.Action
   265  
   266  			// If this specific entity-action permission pair isn't
   267  			// in the map yet. Add it to map, and the permission
   268  			// slice.
   269  			if acts, ok := allPermsMap[entity]; ok {
   270  				if _, ok := acts[action]; !ok {
   271  					allPermsMap[entity][action] = struct{}{}
   272  
   273  					allPerms = append(
   274  						allPerms, perm,
   275  					)
   276  				}
   277  
   278  			} else {
   279  				allPermsMap[entity] = make(map[string]struct{})
   280  				allPermsMap[entity][action] = struct{}{}
   281  				allPerms = append(allPerms, perm)
   282  			}
   283  		}
   284  	}
   285  
   286  	return allPerms
   287  }
   288  
   289  // MainRPCServerPermissions returns a mapping of the main RPC server calls to
   290  // the permissions they require.
   291  func MainRPCServerPermissions() map[string][]bakery.Op {
   292  	return map[string][]bakery.Op{
   293  		"/lnrpc.Lightning/SendCoins": {{
   294  			Entity: "onchain",
   295  			Action: "write",
   296  		}},
   297  		"/lnrpc.Lightning/ListUnspent": {{
   298  			Entity: "onchain",
   299  			Action: "read",
   300  		}},
   301  		"/lnrpc.Lightning/SendMany": {{
   302  			Entity: "onchain",
   303  			Action: "write",
   304  		}},
   305  		"/lnrpc.Lightning/NewAddress": {{
   306  			Entity: "address",
   307  			Action: "write",
   308  		}},
   309  		"/lnrpc.Lightning/SignMessage": {{
   310  			Entity: "message",
   311  			Action: "write",
   312  		}},
   313  		"/lnrpc.Lightning/VerifyMessage": {{
   314  			Entity: "message",
   315  			Action: "read",
   316  		}},
   317  		"/lnrpc.Lightning/ConnectPeer": {{
   318  			Entity: "peers",
   319  			Action: "write",
   320  		}},
   321  		"/lnrpc.Lightning/DisconnectPeer": {{
   322  			Entity: "peers",
   323  			Action: "write",
   324  		}},
   325  		"/lnrpc.Lightning/OpenChannel": {{
   326  			Entity: "onchain",
   327  			Action: "write",
   328  		}, {
   329  			Entity: "offchain",
   330  			Action: "write",
   331  		}},
   332  		"/lnrpc.Lightning/BatchOpenChannel": {{
   333  			Entity: "onchain",
   334  			Action: "write",
   335  		}, {
   336  			Entity: "offchain",
   337  			Action: "write",
   338  		}},
   339  		"/lnrpc.Lightning/OpenChannelSync": {{
   340  			Entity: "onchain",
   341  			Action: "write",
   342  		}, {
   343  			Entity: "offchain",
   344  			Action: "write",
   345  		}},
   346  		"/lnrpc.Lightning/CloseChannel": {{
   347  			Entity: "onchain",
   348  			Action: "write",
   349  		}, {
   350  			Entity: "offchain",
   351  			Action: "write",
   352  		}},
   353  		"/lnrpc.Lightning/AbandonChannel": {{
   354  			Entity: "offchain",
   355  			Action: "write",
   356  		}},
   357  		"/lnrpc.Lightning/GetInfo": {{
   358  			Entity: "info",
   359  			Action: "read",
   360  		}},
   361  		"/lnrpc.Lightning/GetRecoveryInfo": {{
   362  			Entity: "info",
   363  			Action: "read",
   364  		}},
   365  		"/lnrpc.Lightning/ListPeers": {{
   366  			Entity: "peers",
   367  			Action: "read",
   368  		}},
   369  		"/lnrpc.Lightning/WalletBalance": {{
   370  			Entity: "onchain",
   371  			Action: "read",
   372  		}},
   373  		"/lnrpc.Lightning/EstimateFee": {{
   374  			Entity: "onchain",
   375  			Action: "read",
   376  		}},
   377  		"/lnrpc.Lightning/ChannelBalance": {{
   378  			Entity: "offchain",
   379  			Action: "read",
   380  		}},
   381  		"/lnrpc.Lightning/PendingChannels": {{
   382  			Entity: "offchain",
   383  			Action: "read",
   384  		}},
   385  		"/lnrpc.Lightning/ListChannels": {{
   386  			Entity: "offchain",
   387  			Action: "read",
   388  		}},
   389  		"/lnrpc.Lightning/SubscribeChannelEvents": {{
   390  			Entity: "offchain",
   391  			Action: "read",
   392  		}},
   393  		"/lnrpc.Lightning/ClosedChannels": {{
   394  			Entity: "offchain",
   395  			Action: "read",
   396  		}},
   397  		"/lnrpc.Lightning/SendPayment": {{
   398  			Entity: "offchain",
   399  			Action: "write",
   400  		}},
   401  		"/lnrpc.Lightning/SendPaymentSync": {{
   402  			Entity: "offchain",
   403  			Action: "write",
   404  		}},
   405  		"/lnrpc.Lightning/SendToRoute": {{
   406  			Entity: "offchain",
   407  			Action: "write",
   408  		}},
   409  		"/lnrpc.Lightning/SendToRouteSync": {{
   410  			Entity: "offchain",
   411  			Action: "write",
   412  		}},
   413  		"/lnrpc.Lightning/AddInvoice": {{
   414  			Entity: "invoices",
   415  			Action: "write",
   416  		}},
   417  		"/lnrpc.Lightning/LookupInvoice": {{
   418  			Entity: "invoices",
   419  			Action: "read",
   420  		}},
   421  		"/lnrpc.Lightning/ListInvoices": {{
   422  			Entity: "invoices",
   423  			Action: "read",
   424  		}},
   425  		"/lnrpc.Lightning/SubscribeInvoices": {{
   426  			Entity: "invoices",
   427  			Action: "read",
   428  		}},
   429  		"/lnrpc.Lightning/SubscribeTransactions": {{
   430  			Entity: "onchain",
   431  			Action: "read",
   432  		}},
   433  		"/lnrpc.Lightning/GetTransactions": {{
   434  			Entity: "onchain",
   435  			Action: "read",
   436  		}},
   437  		"/lnrpc.Lightning/DescribeGraph": {{
   438  			Entity: "info",
   439  			Action: "read",
   440  		}},
   441  		"/lnrpc.Lightning/GetNodeMetrics": {{
   442  			Entity: "info",
   443  			Action: "read",
   444  		}},
   445  		"/lnrpc.Lightning/GetChanInfo": {{
   446  			Entity: "info",
   447  			Action: "read",
   448  		}},
   449  		"/lnrpc.Lightning/GetNodeInfo": {{
   450  			Entity: "info",
   451  			Action: "read",
   452  		}},
   453  		"/lnrpc.Lightning/EnforceNodePing": {{
   454  			Entity: "peers",
   455  			Action: "write",
   456  		}},
   457  		"/lnrpc.Lightning/QueryRoutes": {{
   458  			Entity: "info",
   459  			Action: "read",
   460  		}},
   461  		"/lnrpc.Lightning/GetNetworkInfo": {{
   462  			Entity: "info",
   463  			Action: "read",
   464  		}},
   465  		"/lnrpc.Lightning/StopDaemon": {{
   466  			Entity: "info",
   467  			Action: "write",
   468  		}},
   469  		"/lnrpc.Lightning/SubscribeChannelGraph": {{
   470  			Entity: "info",
   471  			Action: "read",
   472  		}},
   473  		"/lnrpc.Lightning/ListPayments": {{
   474  			Entity: "offchain",
   475  			Action: "read",
   476  		}},
   477  		"/lnrpc.Lightning/DeletePayment": {{
   478  			Entity: "offchain",
   479  			Action: "write",
   480  		}},
   481  		"/lnrpc.Lightning/DeleteAllPayments": {{
   482  			Entity: "offchain",
   483  			Action: "write",
   484  		}},
   485  		"/lnrpc.Lightning/DebugLevel": {{
   486  			Entity: "info",
   487  			Action: "write",
   488  		}},
   489  		"/lnrpc.Lightning/CalcPaymentStats": {{
   490  			Entity: "info",
   491  			Action: "write",
   492  		}},
   493  		"/lnrpc.Lightning/DecodePayReq": {{
   494  			Entity: "offchain",
   495  			Action: "read",
   496  		}},
   497  		"/lnrpc.Lightning/FeeReport": {{
   498  			Entity: "offchain",
   499  			Action: "read",
   500  		}},
   501  		"/lnrpc.Lightning/UpdateChannelPolicy": {{
   502  			Entity: "offchain",
   503  			Action: "write",
   504  		}},
   505  		"/lnrpc.Lightning/ForwardingHistory": {{
   506  			Entity: "offchain",
   507  			Action: "read",
   508  		}},
   509  		"/lnrpc.Lightning/RestoreChannelBackups": {{
   510  			Entity: "offchain",
   511  			Action: "write",
   512  		}},
   513  		"/lnrpc.Lightning/ExportChannelBackup": {{
   514  			Entity: "offchain",
   515  			Action: "read",
   516  		}},
   517  		"/lnrpc.Lightning/VerifyChanBackup": {{
   518  			Entity: "offchain",
   519  			Action: "read",
   520  		}},
   521  		"/lnrpc.Lightning/ExportAllChannelBackups": {{
   522  			Entity: "offchain",
   523  			Action: "read",
   524  		}},
   525  		"/lnrpc.Lightning/SubscribeChannelBackups": {{
   526  			Entity: "offchain",
   527  			Action: "read",
   528  		}},
   529  		"/lnrpc.Lightning/ChannelAcceptor": {{
   530  			Entity: "onchain",
   531  			Action: "write",
   532  		}, {
   533  			Entity: "offchain",
   534  			Action: "write",
   535  		}},
   536  		"/lnrpc.Lightning/BakeMacaroon": {{
   537  			Entity: "macaroon",
   538  			Action: "generate",
   539  		}},
   540  		"/lnrpc.Lightning/ListMacaroonIDs": {{
   541  			Entity: "macaroon",
   542  			Action: "read",
   543  		}},
   544  		"/lnrpc.Lightning/DeleteMacaroonID": {{
   545  			Entity: "macaroon",
   546  			Action: "write",
   547  		}},
   548  		"/lnrpc.Lightning/ListPermissions": {{
   549  			Entity: "info",
   550  			Action: "read",
   551  		}},
   552  		"/lnrpc.Lightning/CheckMacaroonPermissions": {{
   553  			Entity: "macaroon",
   554  			Action: "read",
   555  		}},
   556  		"/lnrpc.Lightning/SubscribePeerEvents": {{
   557  			Entity: "peers",
   558  			Action: "read",
   559  		}},
   560  		"/lnrpc.Lightning/FundingStateStep": {{
   561  			Entity: "onchain",
   562  			Action: "write",
   563  		}, {
   564  			Entity: "offchain",
   565  			Action: "write",
   566  		}},
   567  		lnrpc.RegisterRPCMiddlewareURI: {{
   568  			Entity: "macaroon",
   569  			Action: "write",
   570  		}},
   571  		"/lnrpc.Lightning/SendCustomMessage": {{
   572  			Entity: "offchain",
   573  			Action: "write",
   574  		}},
   575  		"/lnrpc.Lightning/SubscribeCustomMessages": {{
   576  			Entity: "offchain",
   577  			Action: "read",
   578  		}},
   579  	}
   580  }
   581  
   582  // rpcServer is a gRPC, RPC front end to the lnd daemon.
   583  // TODO(roasbeef): pagination support for the list-style calls
   584  type rpcServer struct {
   585  	started  int32 // To be used atomically.
   586  	shutdown int32 // To be used atomically.
   587  
   588  	// Required by the grpc-gateway/v2 library for forward compatibility.
   589  	// Must be after the atomically used variables to not break struct
   590  	// alignment.
   591  	lnrpc.UnimplementedLightningServer
   592  
   593  	server *server
   594  
   595  	cfg *Config
   596  
   597  	// subServers are a set of sub-RPC servers that use the same gRPC and
   598  	// listening sockets as the main RPC server, but which maintain their
   599  	// own independent service. This allows us to expose a set of
   600  	// micro-service like abstractions to the outside world for users to
   601  	// consume.
   602  	subServers      []lnrpc.SubServer
   603  	subGrpcHandlers []lnrpc.GrpcHandler
   604  
   605  	// routerBackend contains the backend implementation of the router
   606  	// rpc sub server.
   607  	routerBackend *routerrpc.RouterBackend
   608  
   609  	// chanPredicate is used in the bidirectional ChannelAcceptor streaming
   610  	// method.
   611  	chanPredicate *chanacceptor.ChainedAcceptor
   612  
   613  	quit chan struct{}
   614  
   615  	// macService is the macaroon service that we need to mint new
   616  	// macaroons.
   617  	macService *macaroons.Service
   618  
   619  	// selfNode is our own pubkey.
   620  	selfNode route.Vertex
   621  
   622  	// interceptorChain is the the interceptor added to our gRPC server.
   623  	interceptorChain *rpcperms.InterceptorChain
   624  
   625  	// implCfg is the configuration for some of the interfaces that can be
   626  	// provided externally.
   627  	implCfg *ImplementationCfg
   628  
   629  	// interceptor is used to be able to request a shutdown
   630  	interceptor signal.Interceptor
   631  
   632  	graphCache        sync.RWMutex
   633  	describeGraphResp *lnrpc.ChannelGraph
   634  	graphCacheEvictor *time.Timer
   635  }
   636  
   637  // A compile time check to ensure that rpcServer fully implements the
   638  // LightningServer gRPC service.
   639  var _ lnrpc.LightningServer = (*rpcServer)(nil)
   640  
   641  // newRPCServer creates and returns a new instance of the rpcServer. Before
   642  // dependencies are added, this will be an non-functioning RPC server only to
   643  // be used to register the LightningService with the gRPC server.
   644  func newRPCServer(cfg *Config, interceptorChain *rpcperms.InterceptorChain,
   645  	implCfg *ImplementationCfg, interceptor signal.Interceptor) *rpcServer {
   646  
   647  	// We go trhough the list of registered sub-servers, and create a gRPC
   648  	// handler for each. These are used to register with the gRPC server
   649  	// before all dependencies are available.
   650  	registeredSubServers := lnrpc.RegisteredSubServers()
   651  
   652  	var subServerHandlers []lnrpc.GrpcHandler
   653  	for _, subServer := range registeredSubServers {
   654  		subServerHandlers = append(
   655  			subServerHandlers, subServer.NewGrpcHandler(),
   656  		)
   657  	}
   658  
   659  	return &rpcServer{
   660  		cfg:              cfg,
   661  		subGrpcHandlers:  subServerHandlers,
   662  		interceptorChain: interceptorChain,
   663  		implCfg:          implCfg,
   664  		quit:             make(chan struct{}, 1),
   665  		interceptor:      interceptor,
   666  	}
   667  }
   668  
   669  // addDeps populates all dependencies needed by the RPC server, and any
   670  // of the sub-servers that it maintains. When this is done, the RPC server can
   671  // be started, and start accepting RPC calls.
   672  func (r *rpcServer) addDeps(s *server, macService *macaroons.Service,
   673  	subServerCgs *subRPCServerConfigs, atpl *autopilot.Manager,
   674  	invoiceRegistry *invoices.InvoiceRegistry, tower *watchtower.Standalone,
   675  	chanPredicate *chanacceptor.ChainedAcceptor) error {
   676  
   677  	// Set up router rpc backend.
   678  	selfNode, err := s.graphDB.SourceNode()
   679  	if err != nil {
   680  		return err
   681  	}
   682  	graph := s.graphDB
   683  	routerBackend := &routerrpc.RouterBackend{
   684  		MaxPaymentMAtoms: MaxPaymentMAtoms,
   685  		SelfNode:         selfNode.PubKeyBytes,
   686  		FetchChannelCapacity: func(chanID uint64) (dcrutil.Amount,
   687  			error) {
   688  
   689  			info, _, _, err := graph.FetchChannelEdgesByID(chanID)
   690  			if err != nil {
   691  				return 0, err
   692  			}
   693  			return info.Capacity, nil
   694  		},
   695  		FetchChannelEndpoints: func(chanID uint64) (route.Vertex,
   696  			route.Vertex, error) {
   697  
   698  			info, _, _, err := graph.FetchChannelEdgesByID(
   699  				chanID,
   700  			)
   701  			if err != nil {
   702  				return route.Vertex{}, route.Vertex{},
   703  					fmt.Errorf("unable to fetch channel "+
   704  						"edges by channel ID %d: %v",
   705  						chanID, err)
   706  			}
   707  
   708  			return info.NodeKey1Bytes, info.NodeKey2Bytes, nil
   709  		},
   710  		FindRoute:              s.chanRouter.FindRoute,
   711  		MissionControl:         s.missionControl,
   712  		ActiveNetParams:        r.cfg.ActiveNetParams.Params,
   713  		Tower:                  s.controlTower,
   714  		MaxTotalTimelock:       r.cfg.MaxOutgoingCltvExpiry,
   715  		DefaultFinalCltvDelta:  uint16(r.cfg.Decred.TimeLockDelta),
   716  		SubscribeHtlcEvents:    s.htlcNotifier.SubscribeHtlcEvents,
   717  		InterceptableForwarder: s.interceptableSwitch,
   718  		SetChannelEnabled: func(outpoint wire.OutPoint) error {
   719  			return s.chanStatusMgr.RequestEnable(outpoint, true)
   720  		},
   721  		SetChannelDisabled: func(outpoint wire.OutPoint) error {
   722  			return s.chanStatusMgr.RequestDisable(outpoint, true)
   723  		},
   724  		SetChannelAuto: s.chanStatusMgr.RequestAuto,
   725  	}
   726  
   727  	genInvoiceFeatures := func() *lnwire.FeatureVector {
   728  		return s.featureMgr.Get(feature.SetInvoice)
   729  	}
   730  	genAmpInvoiceFeatures := func() *lnwire.FeatureVector {
   731  		return s.featureMgr.Get(feature.SetInvoiceAmp)
   732  	}
   733  
   734  	var (
   735  		subServers     []lnrpc.SubServer
   736  		subServerPerms []lnrpc.MacaroonPerms
   737  	)
   738  
   739  	// Before we create any of the sub-servers, we need to ensure that all
   740  	// the dependencies they need are properly populated within each sub
   741  	// server configuration struct.
   742  	//
   743  	// TODO(roasbeef): extend sub-sever config to have both (local vs remote) DB
   744  	err = subServerCgs.PopulateDependencies(
   745  		r.cfg, s.cc, r.cfg.networkDir, macService, atpl, invoiceRegistry,
   746  		s.htlcSwitch, r.cfg.ActiveNetParams.Params, s.chanRouter,
   747  		routerBackend, s.nodeSigner, s.graphDB, s.chanStateDB,
   748  		s.sweeper, tower, s.towerClient, s.anchorTowerClient,
   749  		r.cfg.net.ResolveTCPAddr, genInvoiceFeatures,
   750  		genAmpInvoiceFeatures, rpcsLog,
   751  	)
   752  	if err != nil {
   753  		return err
   754  	}
   755  
   756  	// Now that the sub-servers have all their dependencies in place, we
   757  	// can create each sub-server!
   758  	for _, subServerInstance := range r.subGrpcHandlers {
   759  		subServer, macPerms, err := subServerInstance.CreateSubServer(
   760  			subServerCgs,
   761  		)
   762  		if err != nil {
   763  			return err
   764  		}
   765  
   766  		// We'll collect the sub-server, and also the set of
   767  		// permissions it needs for macaroons so we can apply the
   768  		// interceptors below.
   769  		subServers = append(subServers, subServer)
   770  		subServerPerms = append(subServerPerms, macPerms)
   771  	}
   772  
   773  	// Next, we need to merge the set of sub server macaroon permissions
   774  	// with the main RPC server permissions so we can unite them under a
   775  	// single set of interceptors.
   776  	for m, ops := range MainRPCServerPermissions() {
   777  		err := r.interceptorChain.AddPermission(m, ops)
   778  		if err != nil {
   779  			return err
   780  		}
   781  	}
   782  
   783  	for _, subServerPerm := range subServerPerms {
   784  		for method, ops := range subServerPerm {
   785  			err := r.interceptorChain.AddPermission(method, ops)
   786  			if err != nil {
   787  				return err
   788  			}
   789  		}
   790  	}
   791  
   792  	// External subserver possibly need to register their own permissions
   793  	// and macaroon validator.
   794  	for method, ops := range r.implCfg.ExternalValidator.Permissions() {
   795  		err := r.interceptorChain.AddPermission(method, ops)
   796  		if err != nil {
   797  			return err
   798  		}
   799  
   800  		// Give the external subservers the possibility to also use
   801  		// their own validator to check any macaroons attached to calls
   802  		// to this method.  This allows them to have their own root key
   803  		// ID database and permission entities.
   804  		err = macService.RegisterExternalValidator(
   805  			method, r.implCfg.ExternalValidator,
   806  		)
   807  		if err != nil {
   808  			return fmt.Errorf("could not register external "+
   809  				"macaroon validator: %v", err)
   810  		}
   811  	}
   812  
   813  	// Finally, with all the set up complete, add the last dependencies to
   814  	// the rpc server.
   815  	r.server = s
   816  	r.subServers = subServers
   817  	r.routerBackend = routerBackend
   818  	r.chanPredicate = chanPredicate
   819  	r.macService = macService
   820  	r.selfNode = selfNode.PubKeyBytes
   821  
   822  	graphCacheDuration := r.cfg.Caches.RPCGraphCacheDuration
   823  	if graphCacheDuration != 0 {
   824  		r.graphCacheEvictor = time.AfterFunc(graphCacheDuration, func() {
   825  			// Grab the mutex and purge the current populated
   826  			// describe graph response.
   827  			r.graphCache.Lock()
   828  			defer r.graphCache.Unlock()
   829  
   830  			r.describeGraphResp = nil
   831  
   832  			// Reset ourselves as well at the end so we run again
   833  			// after the duration.
   834  			r.graphCacheEvictor.Reset(graphCacheDuration)
   835  		})
   836  	}
   837  
   838  	return nil
   839  }
   840  
   841  // RegisterWithGrpcServer registers the rpcServer and any subservers with the
   842  // root gRPC server.
   843  func (r *rpcServer) RegisterWithGrpcServer(grpcServer *grpc.Server) error {
   844  	// Register the main RPC server.
   845  	lnrpc.RegisterLightningServer(grpcServer, r)
   846  
   847  	// Now the main RPC server has been registered, we'll iterate through
   848  	// all the sub-RPC servers and register them to ensure that requests
   849  	// are properly routed towards them.
   850  	for _, subServer := range r.subGrpcHandlers {
   851  		err := subServer.RegisterWithRootServer(grpcServer)
   852  		if err != nil {
   853  			return fmt.Errorf("unable to register "+
   854  				"sub-server with root: %v", err)
   855  		}
   856  	}
   857  
   858  	// Before actually listening on the gRPC listener, give external
   859  	// subservers the chance to register to our gRPC server. Those external
   860  	// subservers (think GrUB) are responsible for starting/stopping on
   861  	// their own, we just let them register their services to the same
   862  	// server instance so all of them can be exposed on the same
   863  	// port/listener.
   864  	err := r.implCfg.RegisterGrpcSubserver(grpcServer)
   865  	if err != nil {
   866  		rpcsLog.Errorf("error registering external gRPC "+
   867  			"subserver: %v", err)
   868  	}
   869  
   870  	return nil
   871  }
   872  
   873  // Start launches any helper goroutines required for the rpcServer to function.
   874  func (r *rpcServer) Start() error {
   875  	if atomic.AddInt32(&r.started, 1) != 1 {
   876  		return nil
   877  	}
   878  
   879  	// First, we'll start all the sub-servers to ensure that they're ready
   880  	// to take new requests in.
   881  	//
   882  	// TODO(roasbeef): some may require that the entire daemon be started
   883  	// at that point
   884  	for _, subServer := range r.subServers {
   885  		rpcsLog.Debugf("Starting sub RPC server: %v", subServer.Name())
   886  
   887  		if err := subServer.Start(); err != nil {
   888  			return err
   889  		}
   890  	}
   891  
   892  	return nil
   893  }
   894  
   895  // RegisterWithRestProxy registers the RPC server and any subservers with the
   896  // given REST proxy.
   897  func (r *rpcServer) RegisterWithRestProxy(restCtx context.Context,
   898  	restMux *proxy.ServeMux, restDialOpts []grpc.DialOption,
   899  	restProxyDest string) error {
   900  
   901  	// With our custom REST proxy mux created, register our main RPC and
   902  	// give all subservers a chance to register as well.
   903  	err := lnrpc.RegisterLightningHandlerFromEndpoint(
   904  		restCtx, restMux, restProxyDest, restDialOpts,
   905  	)
   906  	if err != nil {
   907  		return err
   908  	}
   909  
   910  	for _, subServer := range r.subGrpcHandlers {
   911  		err := subServer.RegisterWithRestServer(
   912  			restCtx, restMux, restProxyDest, restDialOpts,
   913  		)
   914  		if err != nil {
   915  			return fmt.Errorf("unable to register REST sub-server "+
   916  				"with root: %v", err)
   917  		}
   918  	}
   919  
   920  	// Before listening on any of the interfaces, we also want to give the
   921  	// external subservers a chance to register their own REST proxy stub
   922  	// with our mux instance.
   923  	err = r.implCfg.RegisterRestSubserver(
   924  		restCtx, restMux, restProxyDest, restDialOpts,
   925  	)
   926  	if err != nil {
   927  		rpcsLog.Errorf("error registering external REST subserver: %v",
   928  			err)
   929  	}
   930  	return nil
   931  }
   932  
   933  // Stop signals any active goroutines for a graceful closure.
   934  func (r *rpcServer) Stop() error {
   935  	if atomic.AddInt32(&r.shutdown, 1) != 1 {
   936  		return nil
   937  	}
   938  
   939  	rpcsLog.Infof("Stopping RPC Server")
   940  
   941  	close(r.quit)
   942  
   943  	// After we've signalled all of our active goroutines to exit, we'll
   944  	// then do the same to signal a graceful shutdown of all the sub
   945  	// servers.
   946  	for _, subServer := range r.subServers {
   947  		rpcsLog.Infof("Stopping %v Sub-RPC Server",
   948  			subServer.Name())
   949  
   950  		if err := subServer.Stop(); err != nil {
   951  			rpcsLog.Errorf("unable to stop sub-server %v: %v",
   952  				subServer.Name(), err)
   953  			continue
   954  		}
   955  	}
   956  
   957  	return nil
   958  }
   959  
   960  // addrPairsToOutputs converts a map describing a set of outputs to be created,
   961  // the outputs themselves. The passed map pairs up an address, to a desired
   962  // output value amount. Each address is converted to its corresponding pkScript
   963  // to be used within the constructed output(s).
   964  func addrPairsToOutputs(addrPairs map[string]int64,
   965  	netParams *chaincfg.Params) ([]*wire.TxOut, error) {
   966  
   967  	outputs := make([]*wire.TxOut, 0, len(addrPairs))
   968  	for addr, amt := range addrPairs {
   969  		addr, err := stdaddr.DecodeAddress(addr, netParams)
   970  		if err != nil {
   971  			return nil, err
   972  		}
   973  
   974  		pkscript, err := input.PayToAddrScript(addr)
   975  		if err != nil {
   976  			return nil, err
   977  		}
   978  
   979  		outputs = append(outputs, wire.NewTxOut(amt, pkscript))
   980  	}
   981  
   982  	return outputs, nil
   983  }
   984  
   985  // allowCORS wraps the given http.Handler with a function that adds the
   986  // Access-Control-Allow-Origin header to the response.
   987  func allowCORS(handler http.Handler, origins []string) http.Handler {
   988  	allowHeaders := "Access-Control-Allow-Headers"
   989  	allowMethods := "Access-Control-Allow-Methods"
   990  	allowOrigin := "Access-Control-Allow-Origin"
   991  
   992  	// If the user didn't supply any origins that means CORS is disabled
   993  	// and we should return the original handler.
   994  	if len(origins) == 0 {
   995  		return handler
   996  	}
   997  
   998  	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
   999  		origin := r.Header.Get("Origin")
  1000  
  1001  		// Skip everything if the browser doesn't send the Origin field.
  1002  		if origin == "" {
  1003  			handler.ServeHTTP(w, r)
  1004  			return
  1005  		}
  1006  
  1007  		// Set the static header fields first.
  1008  		w.Header().Set(
  1009  			allowHeaders,
  1010  			"Content-Type, Accept, Grpc-Metadata-Macaroon",
  1011  		)
  1012  		w.Header().Set(allowMethods, "GET, POST, DELETE")
  1013  
  1014  		// Either we allow all origins or the incoming request matches
  1015  		// a specific origin in our list of allowed origins.
  1016  		for _, allowedOrigin := range origins {
  1017  			if allowedOrigin == "*" || origin == allowedOrigin {
  1018  				// Only set allowed origin to requested origin.
  1019  				w.Header().Set(allowOrigin, origin)
  1020  
  1021  				break
  1022  			}
  1023  		}
  1024  
  1025  		// For a pre-flight request we only need to send the headers
  1026  		// back. No need to call the rest of the chain.
  1027  		if r.Method == "OPTIONS" {
  1028  			return
  1029  		}
  1030  
  1031  		// Everything's prepared now, we can pass the request along the
  1032  		// chain of handlers.
  1033  		handler.ServeHTTP(w, r)
  1034  	})
  1035  }
  1036  
  1037  // sendCoinsOnChain makes an on-chain transaction in or to send coins to one or
  1038  // more addresses specified in the passed payment map. The payment map maps an
  1039  // address to a specified output value to be sent to that address.
  1040  func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64,
  1041  	feeRate chainfee.AtomPerKByte, minConfs int32,
  1042  	label, fromAccount string) (*chainhash.Hash, error) {
  1043  
  1044  	outputs, err := addrPairsToOutputs(paymentMap, r.cfg.ActiveNetParams.Params)
  1045  	if err != nil {
  1046  		return nil, err
  1047  	}
  1048  
  1049  	// Note(decred): This section is commented out because the wallet does not
  1050  	// implement CreateSimpleTx. Anchor commitments are disabled in mainnet,
  1051  	// therefore this isn't critical.
  1052  
  1053  	// We first do a dry run, to sanity check we won't spend our wallet
  1054  	// balance below the reserved amount.
  1055  	// 	authoredTx, err := r.server.cc.Wallet.CreateSimpleTx(
  1056  	// 		outputs, feeRate, minConfs, true,
  1057  	// 	)
  1058  	// 	if err != nil {
  1059  	// 		return nil, err
  1060  	// 	}
  1061  	//
  1062  	// 	_, err = r.server.cc.Wallet.CheckReservedValueTx(
  1063  	// 		lnwallet.CheckReservedValueTxReq{
  1064  	//			Tx: authoredTx.Tx,
  1065  	//			ChangeIndex: &authoredTx.ChangeIndex,
  1066  	//		},
  1067  	//	)
  1068  	// 	if err != nil {
  1069  	// 		return nil, err
  1070  	// 	}
  1071  
  1072  	// If that checks out, we're fairly confident that creating sending to
  1073  	// these outputs will keep the wallet balance above the reserve.
  1074  	tx, err := r.server.cc.Wallet.SendOutputs(
  1075  		outputs, feeRate, minConfs, label, fromAccount,
  1076  	)
  1077  	if err != nil {
  1078  		return nil, err
  1079  	}
  1080  
  1081  	txHash := tx.TxHash()
  1082  	return &txHash, nil
  1083  }
  1084  
  1085  // ListUnspent returns useful information about each unspent output owned by
  1086  // the wallet, as reported by the underlying `ListUnspentWitness`; the
  1087  // information returned is: outpoint, amount in satoshis, address, address
  1088  // type, scriptPubKey in hex and number of confirmations.  The result is
  1089  // filtered to contain outputs whose number of confirmations is between a
  1090  // minimum and maximum number of confirmations specified by the user, with 0
  1091  // meaning unconfirmed.
  1092  func (r *rpcServer) ListUnspent(ctx context.Context,
  1093  	in *lnrpc.ListUnspentRequest) (*lnrpc.ListUnspentResponse, error) {
  1094  
  1095  	// Validate the confirmation arguments.
  1096  	minConfs, maxConfs, err := lnrpc.ParseConfs(in.MinConfs, in.MaxConfs)
  1097  	if err != nil {
  1098  		return nil, err
  1099  	}
  1100  
  1101  	// With our arguments validated, we'll query the internal wallet for
  1102  	// the set of UTXOs that match our query.
  1103  	//
  1104  	// We'll acquire the global coin selection lock to ensure there aren't
  1105  	// any other concurrent processes attempting to lock any UTXOs which may
  1106  	// be shown available to us.
  1107  	var utxos []*lnwallet.Utxo
  1108  	err = r.server.cc.Wallet.WithCoinSelectLock(func() error {
  1109  		utxos, err = r.server.cc.Wallet.ListUnspentWitness(
  1110  			minConfs, maxConfs, in.Account,
  1111  		)
  1112  		return err
  1113  	})
  1114  	if err != nil {
  1115  		return nil, err
  1116  	}
  1117  
  1118  	rpcUtxos, err := lnrpc.MarshalUtxos(utxos, r.cfg.ActiveNetParams.Params)
  1119  	if err != nil {
  1120  		return nil, err
  1121  	}
  1122  
  1123  	maxStr := ""
  1124  	if maxConfs != math.MaxInt32 {
  1125  		maxStr = " max=" + fmt.Sprintf("%d", maxConfs)
  1126  	}
  1127  
  1128  	rpcsLog.Debugf("[listunspent] min=%v%v, generated utxos: %v", minConfs,
  1129  		maxStr, utxos)
  1130  
  1131  	return &lnrpc.ListUnspentResponse{
  1132  		Utxos: rpcUtxos,
  1133  	}, nil
  1134  }
  1135  
  1136  // EstimateFee handles a request for estimating the fee for sending a
  1137  // transaction spending to multiple specified outputs in parallel.
  1138  func (r *rpcServer) EstimateFee(ctx context.Context,
  1139  	in *lnrpc.EstimateFeeRequest) (*lnrpc.EstimateFeeResponse, error) {
  1140  
  1141  	// Create the list of outputs we are spending to.
  1142  	outputs, err := addrPairsToOutputs(in.AddrToAmount, r.cfg.ActiveNetParams.Params)
  1143  	if err != nil {
  1144  		return nil, err
  1145  	}
  1146  
  1147  	// Query the fee estimator for the fee rate for the given confirmation
  1148  	// target.
  1149  	target := in.TargetConf
  1150  	feePerKB, err := sweep.DetermineFeePerKB(
  1151  		r.server.cc.FeeEstimator, sweep.FeePreference{
  1152  			ConfTarget: uint32(target),
  1153  		},
  1154  	)
  1155  	if err != nil {
  1156  		return nil, err
  1157  	}
  1158  
  1159  	// Then, we'll extract the minimum number of confirmations that each
  1160  	// output we use to fund the transaction should satisfy.
  1161  	minConfs, err := lnrpc.ExtractMinConfs(
  1162  		in.GetMinConfs(), in.GetSpendUnconfirmed(),
  1163  	)
  1164  	if err != nil {
  1165  		return nil, err
  1166  	}
  1167  
  1168  	// We will ask the wallet to create a tx using this fee rate. We set
  1169  	// dryRun=true to avoid inflating the change addresses in the db.
  1170  	var tx *txauthor.AuthoredTx
  1171  	wallet := r.server.cc.Wallet
  1172  	err = wallet.WithCoinSelectLock(func() error {
  1173  		tx, err = wallet.CreateSimpleTx(outputs, feePerKB, minConfs, true)
  1174  		return err
  1175  	})
  1176  	if err != nil {
  1177  		return nil, err
  1178  	}
  1179  
  1180  	// Use the created tx to calculate the total fee.
  1181  	totalOutput := int64(0)
  1182  	for _, out := range tx.Tx.TxOut {
  1183  		totalOutput += out.Value
  1184  	}
  1185  	totalFee := int64(tx.TotalInput) - totalOutput
  1186  
  1187  	resp := &lnrpc.EstimateFeeResponse{
  1188  		FeeAtoms:            totalFee,
  1189  		FeerateAtomsPerByte: int64(feePerKB / 1000),
  1190  	}
  1191  
  1192  	rpcsLog.Debugf("[estimatefee] fee estimate for conf target %d: %v",
  1193  		target, resp)
  1194  
  1195  	return resp, nil
  1196  }
  1197  
  1198  // SendCoins executes a request to send coins to a particular address. Unlike
  1199  // SendMany, this RPC call only allows creating a single output at a time.
  1200  func (r *rpcServer) SendCoins(ctx context.Context,
  1201  	in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
  1202  
  1203  	// Calculate an appropriate fee rate for this transaction.
  1204  	feePerKB, err := calculateFeeRate(
  1205  		uint64(in.AtomsPerByte),
  1206  		uint32(in.TargetConf), r.server.cc.FeeEstimator,
  1207  	)
  1208  	if err != nil {
  1209  		return nil, err
  1210  	}
  1211  
  1212  	// Then, we'll extract the minimum number of confirmations that each
  1213  	// output we use to fund the transaction should satisfy.
  1214  	minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed)
  1215  	if err != nil {
  1216  		return nil, err
  1217  	}
  1218  
  1219  	rpcsLog.Infof("[sendcoins] addr=%v, amt=%v, atom/kb=%v, min_confs=%v, "+
  1220  		"send_all=%v",
  1221  		in.Addr, dcrutil.Amount(in.Amount), int64(feePerKB), minConfs,
  1222  		in.SendAll)
  1223  
  1224  	// Decode the address receiving the coins, we need to check whether the
  1225  	// address is valid for this network.
  1226  	targetAddr, err := stdaddr.DecodeAddress(
  1227  		in.Addr, r.cfg.ActiveNetParams.Params,
  1228  	)
  1229  	if err != nil {
  1230  		return nil, err
  1231  	}
  1232  
  1233  	// If the destination address parses to a valid pubkey, we assume the
  1234  	// user accidentally tried to send funds to a bare pubkey address. This
  1235  	// check is here to prevent unintended transfers.
  1236  	decodedAddr, _ := hex.DecodeString(in.Addr)
  1237  	_, err = secp256k1.ParsePubKey(decodedAddr)
  1238  	if err == nil {
  1239  		return nil, fmt.Errorf("cannot send coins to pubkeys")
  1240  	}
  1241  
  1242  	label, err := labels.ValidateAPI(in.Label)
  1243  	if err != nil {
  1244  		return nil, err
  1245  	}
  1246  
  1247  	var txid *chainhash.Hash
  1248  
  1249  	wallet := r.server.cc.Wallet
  1250  
  1251  	// If the send all flag is active, then we'll attempt to sweep all the
  1252  	// coins in the wallet in a single transaction (if possible),
  1253  	// otherwise, we'll respect the amount, and attempt a regular 2-output
  1254  	// send.
  1255  	if in.SendAll {
  1256  		// At this point, the amount shouldn't be set since we've been
  1257  		// instructed to sweep all the coins from the wallet.
  1258  		if in.Amount != 0 {
  1259  			return nil, fmt.Errorf("amount set while SendAll is " +
  1260  				"active")
  1261  		}
  1262  
  1263  		_, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
  1264  		if err != nil {
  1265  			return nil, err
  1266  		}
  1267  
  1268  		// With the sweeper instance created, we can now generate a
  1269  		// transaction that will sweep ALL outputs from the wallet in a
  1270  		// single transaction. This will be generated in a concurrent
  1271  		// safe manner, so no need to worry about locking. The tx will
  1272  		// pay to the change address created above if we needed to
  1273  		// reserve any value, the rest will go to targetAddr.
  1274  		sweepTxPkg, err := sweep.CraftSweepAllTx(
  1275  			feePerKB, uint32(bestHeight), nil, targetAddr, wallet,
  1276  			wallet, wallet.WalletController,
  1277  			r.server.cc.FeeEstimator, r.server.cc.Signer,
  1278  			r.cfg.ActiveNetParams.Params,
  1279  			minConfs,
  1280  		)
  1281  		if err != nil {
  1282  			return nil, err
  1283  		}
  1284  
  1285  		// Before we publish the transaction we make sure it won't
  1286  		// violate our reserved wallet value.
  1287  		var reservedVal dcrutil.Amount
  1288  		err = wallet.WithCoinSelectLock(func() error {
  1289  			var err error
  1290  			reservedVal, err = wallet.CheckReservedValueTx(
  1291  				lnwallet.CheckReservedValueTxReq{
  1292  					Tx: sweepTxPkg.SweepTx,
  1293  				},
  1294  			)
  1295  			return err
  1296  		})
  1297  
  1298  		// If sending everything to this address would invalidate our
  1299  		// reserved wallet balance, we create a new sweep tx, where
  1300  		// we'll send the reserved value back to our wallet.
  1301  		if err == lnwallet.ErrReservedValueInvalidated {
  1302  			sweepTxPkg.CancelSweepAttempt()
  1303  
  1304  			rpcsLog.Debugf("Reserved value %v not satisfied after "+
  1305  				"send_all, trying with change output",
  1306  				reservedVal)
  1307  
  1308  			// We'll request a change address from the wallet,
  1309  			// where we'll send this reserved value back to. This
  1310  			// ensures this is an address the wallet knows about,
  1311  			// allowing us to pass the reserved value check.
  1312  			changeAddr, err := r.server.cc.Wallet.NewAddress(
  1313  				lnwallet.PubKeyHash, true, lnwallet.DefaultAccountName,
  1314  			)
  1315  			if err != nil {
  1316  				return nil, err
  1317  			}
  1318  
  1319  			// Send the reserved value to this change address, the
  1320  			// remaining funds will go to the targetAddr.
  1321  			outputs := []sweep.DeliveryAddr{
  1322  				{
  1323  					Addr: changeAddr,
  1324  					Amt:  reservedVal,
  1325  				},
  1326  			}
  1327  
  1328  			sweepTxPkg, err = sweep.CraftSweepAllTx(
  1329  				feePerKB, uint32(bestHeight), outputs,
  1330  				targetAddr, wallet, wallet,
  1331  				wallet.WalletController,
  1332  				r.server.cc.FeeEstimator, r.server.cc.Signer,
  1333  				r.cfg.ActiveNetParams.Params,
  1334  				minConfs,
  1335  			)
  1336  			if err != nil {
  1337  				return nil, err
  1338  			}
  1339  
  1340  			// Sanity check the new tx by re-doing the check.
  1341  			err = wallet.WithCoinSelectLock(func() error {
  1342  				_, err := wallet.CheckReservedValueTx(
  1343  					lnwallet.CheckReservedValueTxReq{
  1344  						Tx: sweepTxPkg.SweepTx,
  1345  					},
  1346  				)
  1347  				return err
  1348  			})
  1349  			if err != nil {
  1350  				sweepTxPkg.CancelSweepAttempt()
  1351  
  1352  				return nil, err
  1353  			}
  1354  		} else if err != nil {
  1355  			sweepTxPkg.CancelSweepAttempt()
  1356  
  1357  			return nil, err
  1358  		}
  1359  
  1360  		rpcsLog.Debugf("Sweeping all coins from wallet to addr=%v, "+
  1361  			"with tx=%v", in.Addr, spew.Sdump(sweepTxPkg.SweepTx))
  1362  
  1363  		// As our sweep transaction was created, successfully, we'll
  1364  		// now attempt to publish it, cancelling the sweep pkg to
  1365  		// return all outputs if it fails.
  1366  		err = wallet.PublishTransaction(sweepTxPkg.SweepTx, label)
  1367  		if err != nil {
  1368  			sweepTxPkg.CancelSweepAttempt()
  1369  
  1370  			return nil, fmt.Errorf("unable to broadcast sweep "+
  1371  				"transaction: %v", err)
  1372  		}
  1373  
  1374  		sweepTXID := sweepTxPkg.SweepTx.TxHash()
  1375  		txid = &sweepTXID
  1376  	} else {
  1377  
  1378  		// We'll now construct out payment map, and use the wallet's
  1379  		// coin selection synchronization method to ensure that no coin
  1380  		// selection (funding, sweep alls, other sends) can proceed
  1381  		// while we instruct the wallet to send this transaction.
  1382  		paymentMap := map[string]int64{targetAddr.String(): in.Amount}
  1383  		err := wallet.WithCoinSelectLock(func() error {
  1384  			newTXID, err := r.sendCoinsOnChain(
  1385  				paymentMap, feePerKB, minConfs, label, in.Account,
  1386  			)
  1387  			if err != nil {
  1388  				return err
  1389  			}
  1390  
  1391  			txid = newTXID
  1392  
  1393  			return nil
  1394  		})
  1395  		if err != nil {
  1396  			return nil, err
  1397  		}
  1398  	}
  1399  
  1400  	rpcsLog.Infof("[sendcoins] spend generated txid: %v", txid.String())
  1401  
  1402  	return &lnrpc.SendCoinsResponse{Txid: txid.String()}, nil
  1403  }
  1404  
  1405  // SendMany handles a request for a transaction create multiple specified
  1406  // outputs in parallel.
  1407  func (r *rpcServer) SendMany(ctx context.Context,
  1408  	in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
  1409  
  1410  	// Calculate an appropriate fee rate for this transaction.
  1411  	feePerKB, err := calculateFeeRate(
  1412  		uint64(in.AtomsPerByte),
  1413  		uint32(in.TargetConf), r.server.cc.FeeEstimator,
  1414  	)
  1415  	if err != nil {
  1416  		return nil, err
  1417  	}
  1418  
  1419  	// Then, we'll extract the minimum number of confirmations that each
  1420  	// output we use to fund the transaction should satisfy.
  1421  	minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed)
  1422  	if err != nil {
  1423  		return nil, err
  1424  	}
  1425  
  1426  	label, err := labels.ValidateAPI(in.Label)
  1427  	if err != nil {
  1428  		return nil, err
  1429  	}
  1430  
  1431  	rpcsLog.Infof("[sendmany] outputs=%v, atom/kB=%v",
  1432  		spew.Sdump(in.AddrToAmount), int64(feePerKB))
  1433  
  1434  	var txid *chainhash.Hash
  1435  
  1436  	// We'll attempt to send to the target set of outputs, ensuring that we
  1437  	// synchronize with any other ongoing coin selection attempts which
  1438  	// happen to also be concurrently executing.
  1439  	wallet := r.server.cc.Wallet
  1440  	err = wallet.WithCoinSelectLock(func() error {
  1441  
  1442  		sendManyTXID, err := r.sendCoinsOnChain(
  1443  			in.AddrToAmount, feePerKB, minConfs, label, in.Account,
  1444  		)
  1445  		if err != nil {
  1446  			return err
  1447  		}
  1448  
  1449  		txid = sendManyTXID
  1450  
  1451  		return nil
  1452  	})
  1453  	if err != nil {
  1454  		return nil, err
  1455  	}
  1456  
  1457  	rpcsLog.Infof("[sendmany] spend generated txid: %v", txid.String())
  1458  
  1459  	return &lnrpc.SendManyResponse{Txid: txid.String()}, nil
  1460  }
  1461  
  1462  // NewAddress creates a new address under control of the local wallet.
  1463  func (r *rpcServer) NewAddress(ctx context.Context,
  1464  	in *lnrpc.NewAddressRequest) (*lnrpc.NewAddressResponse, error) {
  1465  
  1466  	// Always use the default wallet account unless one was specified.
  1467  	account := lnwallet.DefaultAccountName
  1468  	if in.Account != "" {
  1469  		account = in.Account
  1470  	}
  1471  
  1472  	// Translate the gRPC proto address type to the wallet controller's
  1473  	// available address types.
  1474  	var (
  1475  		addr stdaddr.Address
  1476  		err  error
  1477  	)
  1478  	switch in.Type {
  1479  	case lnrpc.AddressType_PUBKEY_HASH:
  1480  		addr, err = r.server.cc.Wallet.NewAddress(
  1481  			lnwallet.PubKeyHash, false, account,
  1482  		)
  1483  		if err != nil {
  1484  			return nil, err
  1485  		}
  1486  	case lnrpc.AddressType_UNUSED_PUBKEY_HASH:
  1487  		addr, err = r.server.cc.Wallet.LastUnusedAddress(
  1488  			lnwallet.PubKeyHash, account,
  1489  		)
  1490  		if err != nil {
  1491  			return nil, err
  1492  		}
  1493  
  1494  	default:
  1495  		return nil, fmt.Errorf("unsupported address type %s", in.Type)
  1496  	}
  1497  
  1498  	rpcsLog.Debugf("[newaddress] account=%v type=%v addr=%v", account,
  1499  		in.Type, addr.String())
  1500  	return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
  1501  }
  1502  
  1503  var (
  1504  	// signedMsgPrefix is a special prefix that we'll prepend to any
  1505  	// messages we sign/verify. We do this to ensure that we don't
  1506  	// accidentally sign a sighash, or other sensitive material. By
  1507  	// prepending this fragment, we mind message signing to our particular
  1508  	// context.
  1509  	signedMsgPrefix = []byte("Lightning Signed Message:")
  1510  )
  1511  
  1512  // SignMessage signs a message with the resident node's private key. The
  1513  // returned signature string is zbase32 encoded and pubkey recoverable, meaning
  1514  // that only the message digest and signature are needed for verification.
  1515  func (r *rpcServer) SignMessage(_ context.Context,
  1516  	in *lnrpc.SignMessageRequest) (*lnrpc.SignMessageResponse, error) {
  1517  
  1518  	if in.Msg == nil {
  1519  		return nil, fmt.Errorf("need a message to sign")
  1520  	}
  1521  
  1522  	in.Msg = append(signedMsgPrefix, in.Msg...)
  1523  	sigBytes, err := r.server.nodeSigner.SignMessageCompact(
  1524  		in.Msg, !in.SingleHash,
  1525  	)
  1526  	if err != nil {
  1527  		return nil, err
  1528  	}
  1529  
  1530  	sig := zbase32.EncodeToString(sigBytes)
  1531  	return &lnrpc.SignMessageResponse{Signature: sig}, nil
  1532  }
  1533  
  1534  // VerifyMessage verifies a signature over a msg. The signature must be zbase32
  1535  // encoded and signed by an active node in the resident node's channel
  1536  // database. In addition to returning the validity of the signature,
  1537  // VerifyMessage also returns the recovered pubkey from the signature.
  1538  func (r *rpcServer) VerifyMessage(ctx context.Context,
  1539  	in *lnrpc.VerifyMessageRequest) (*lnrpc.VerifyMessageResponse, error) {
  1540  
  1541  	if in.Msg == nil {
  1542  		return nil, fmt.Errorf("need a message to verify")
  1543  	}
  1544  
  1545  	// The signature should be zbase32 encoded
  1546  	sig, err := zbase32.DecodeString(in.Signature)
  1547  	if err != nil {
  1548  		return nil, fmt.Errorf("failed to decode signature: %v", err)
  1549  	}
  1550  
  1551  	// The signature is over the double-sha256 hash of the message.
  1552  	in.Msg = append(signedMsgPrefix, in.Msg...)
  1553  	digest := chainhash.HashB(in.Msg)
  1554  
  1555  	// RecoverCompact both recovers the pubkey and validates the signature.
  1556  	pubKey, _, err := ecdsa.RecoverCompact(sig, digest)
  1557  	if err != nil {
  1558  		return &lnrpc.VerifyMessageResponse{Valid: false}, nil
  1559  	}
  1560  	pubKeyHex := hex.EncodeToString(pubKey.SerializeCompressed())
  1561  
  1562  	var pub [33]byte
  1563  	copy(pub[:], pubKey.SerializeCompressed())
  1564  
  1565  	// Query the channel graph to ensure a node in the network with active
  1566  	// channels signed the message.
  1567  	//
  1568  	// TODO(phlip9): Require valid nodes to have capital in active channels.
  1569  	graph := r.server.graphDB
  1570  	_, active, err := graph.HasLightningNode(pub)
  1571  	if err != nil {
  1572  		return nil, fmt.Errorf("failed to query graph: %v", err)
  1573  	}
  1574  
  1575  	return &lnrpc.VerifyMessageResponse{
  1576  		Valid:  active,
  1577  		Pubkey: pubKeyHex,
  1578  	}, nil
  1579  }
  1580  
  1581  // ConnectPeer attempts to establish a connection to a remote peer.
  1582  func (r *rpcServer) ConnectPeer(ctx context.Context,
  1583  	in *lnrpc.ConnectPeerRequest) (*lnrpc.ConnectPeerResponse, error) {
  1584  
  1585  	// The server hasn't yet started, so it won't be able to service any of
  1586  	// our requests, so we'll bail early here.
  1587  	if !r.server.Started() {
  1588  		return nil, ErrServerNotActive
  1589  	}
  1590  
  1591  	if in.Addr == nil {
  1592  		return nil, fmt.Errorf("need: lnc pubkeyhash@hostname")
  1593  	}
  1594  
  1595  	pubkeyHex, err := hex.DecodeString(in.Addr.Pubkey)
  1596  	if err != nil {
  1597  		return nil, err
  1598  	}
  1599  	pubKey, err := secp256k1.ParsePubKey(pubkeyHex)
  1600  	if err != nil {
  1601  		return nil, err
  1602  	}
  1603  
  1604  	// Connections to ourselves are disallowed for obvious reasons.
  1605  	if pubKey.IsEqual(r.server.identityECDH.PubKey()) {
  1606  		return nil, fmt.Errorf("cannot make connection to self")
  1607  	}
  1608  
  1609  	addr, err := parseAddr(in.Addr.Host, r.cfg.net)
  1610  	if err != nil {
  1611  		return nil, err
  1612  	}
  1613  
  1614  	peerAddr := &lnwire.NetAddress{
  1615  		IdentityKey: pubKey,
  1616  		Address:     addr,
  1617  		ChainNet:    r.cfg.ActiveNetParams.Net,
  1618  	}
  1619  
  1620  	rpcsLog.Debugf("[connectpeer] requested connection to %x@%s",
  1621  		peerAddr.IdentityKey.SerializeCompressed(), peerAddr.Address)
  1622  
  1623  	// By default, we will use the global connection timeout value.
  1624  	timeout := r.cfg.ConnectionTimeout
  1625  
  1626  	// Check if the connection timeout is set. If set, we will use it in our
  1627  	// request.
  1628  	if in.Timeout != 0 {
  1629  		timeout = time.Duration(in.Timeout) * time.Second
  1630  		rpcsLog.Debugf(
  1631  			"[connectpeer] connection timeout is set to %v",
  1632  			timeout,
  1633  		)
  1634  	}
  1635  
  1636  	if err := r.server.ConnectToPeer(peerAddr,
  1637  		in.Perm, timeout); err != nil {
  1638  
  1639  		rpcsLog.Errorf(
  1640  			"[connectpeer]: error connecting to peer: %v", err,
  1641  		)
  1642  		return nil, err
  1643  	}
  1644  
  1645  	rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
  1646  	return &lnrpc.ConnectPeerResponse{}, nil
  1647  }
  1648  
  1649  // DisconnectPeer attempts to disconnect one peer from another identified by a
  1650  // given pubKey. In the case that we currently have a pending or active channel
  1651  // with the target peer, this action will be disallowed.
  1652  func (r *rpcServer) DisconnectPeer(ctx context.Context,
  1653  	in *lnrpc.DisconnectPeerRequest) (*lnrpc.DisconnectPeerResponse, error) {
  1654  
  1655  	rpcsLog.Debugf("[disconnectpeer] from peer(%s)", in.PubKey)
  1656  
  1657  	if !r.server.Started() {
  1658  		return nil, ErrServerNotActive
  1659  	}
  1660  
  1661  	// First we'll validate the string passed in within the request to
  1662  	// ensure that it's a valid hex-string, and also a valid compressed
  1663  	// public key.
  1664  	pubKeyBytes, err := hex.DecodeString(in.PubKey)
  1665  	if err != nil {
  1666  		return nil, fmt.Errorf("unable to decode pubkey bytes: %v", err)
  1667  	}
  1668  	peerPubKey, err := secp256k1.ParsePubKey(pubKeyBytes)
  1669  	if err != nil {
  1670  		return nil, fmt.Errorf("unable to parse pubkey: %v", err)
  1671  	}
  1672  
  1673  	// Next, we'll fetch the pending/active channels we have with a
  1674  	// particular peer.
  1675  	nodeChannels, err := r.server.chanStateDB.FetchOpenChannels(peerPubKey)
  1676  	if err != nil {
  1677  		return nil, fmt.Errorf("unable to fetch channels for peer: %v", err)
  1678  	}
  1679  
  1680  	// In order to avoid erroneously disconnecting from a peer that we have
  1681  	// an active channel with, if we have any channels active with this
  1682  	// peer, then we'll disallow disconnecting from them.
  1683  	if len(nodeChannels) > 0 && !r.cfg.UnsafeDisconnect {
  1684  		return nil, fmt.Errorf("cannot disconnect from peer(%x), "+
  1685  			"all active channels with the peer need to be closed "+
  1686  			"first", pubKeyBytes)
  1687  	}
  1688  
  1689  	// With all initial validation complete, we'll now request that the
  1690  	// server disconnects from the peer.
  1691  	if err := r.server.DisconnectPeer(peerPubKey); err != nil {
  1692  		return nil, fmt.Errorf("unable to disconnect peer: %v", err)
  1693  	}
  1694  
  1695  	return &lnrpc.DisconnectPeerResponse{}, nil
  1696  }
  1697  
  1698  // newFundingShimAssembler returns a new fully populated
  1699  // chanfunding.CannedAssembler using a FundingShim obtained from an RPC caller.
  1700  func newFundingShimAssembler(chanPointShim *lnrpc.ChanPointShim, initiator bool,
  1701  	keyRing keychain.KeyRing) (chanfunding.Assembler, error) {
  1702  
  1703  	// Perform some basic sanity checks to ensure that all the expected
  1704  	// fields are populated.
  1705  	switch {
  1706  	case chanPointShim.RemoteKey == nil:
  1707  		return nil, fmt.Errorf("remote key not set")
  1708  
  1709  	case chanPointShim.LocalKey == nil:
  1710  		return nil, fmt.Errorf("local key desc not set")
  1711  
  1712  	case chanPointShim.LocalKey.RawKeyBytes == nil:
  1713  		return nil, fmt.Errorf("local raw key bytes not set")
  1714  
  1715  	case chanPointShim.LocalKey.KeyLoc == nil:
  1716  		return nil, fmt.Errorf("local key loc not set")
  1717  
  1718  	case chanPointShim.ChanPoint == nil:
  1719  		return nil, fmt.Errorf("chan point not set")
  1720  
  1721  	case len(chanPointShim.PendingChanId) != 32:
  1722  		return nil, fmt.Errorf("pending chan ID not set")
  1723  	}
  1724  
  1725  	// First, we'll map the RPC's channel point to one we can actually use.
  1726  	index := chanPointShim.ChanPoint.OutputIndex
  1727  	txid, err := lnrpc.GetChanPointFundingTxid(chanPointShim.ChanPoint)
  1728  	if err != nil {
  1729  		return nil, err
  1730  	}
  1731  	chanPoint := wire.NewOutPoint(txid, index, wire.TxTreeRegular)
  1732  
  1733  	// Next we'll parse out the remote party's funding key, as well as our
  1734  	// full key descriptor.
  1735  	remoteKey, err := secp256k1.ParsePubKey(
  1736  		chanPointShim.RemoteKey,
  1737  	)
  1738  	if err != nil {
  1739  		return nil, err
  1740  	}
  1741  
  1742  	shimKeyDesc := chanPointShim.LocalKey
  1743  	localKey, err := secp256k1.ParsePubKey(
  1744  		shimKeyDesc.RawKeyBytes,
  1745  	)
  1746  	if err != nil {
  1747  		return nil, err
  1748  	}
  1749  	localKeyDesc := keychain.KeyDescriptor{
  1750  		PubKey: localKey,
  1751  		KeyLocator: keychain.KeyLocator{
  1752  			Family: keychain.KeyFamily(
  1753  				shimKeyDesc.KeyLoc.KeyFamily,
  1754  			),
  1755  			Index: uint32(shimKeyDesc.KeyLoc.KeyIndex),
  1756  		},
  1757  	}
  1758  
  1759  	// Verify that if we re-derive this key according to the passed
  1760  	// KeyLocator, that we get the exact same key back. Otherwise, we may
  1761  	// end up in a situation where we aren't able to actually sign for this
  1762  	// newly created channel.
  1763  	derivedKey, err := keyRing.DeriveKey(localKeyDesc.KeyLocator)
  1764  	if err != nil {
  1765  		return nil, err
  1766  	}
  1767  	if !derivedKey.PubKey.IsEqual(localKey) {
  1768  		return nil, fmt.Errorf("KeyLocator does not match attached " +
  1769  			"raw pubkey")
  1770  	}
  1771  
  1772  	// With all the parts assembled, we can now make the canned assembler
  1773  	// to pass into the wallet.
  1774  	return chanfunding.NewCannedAssembler(
  1775  		chanPointShim.ThawHeight, *chanPoint,
  1776  		dcrutil.Amount(chanPointShim.Amt), &localKeyDesc,
  1777  		remoteKey, initiator,
  1778  	), nil
  1779  }
  1780  
  1781  // newFundingShimAssembler returns a new fully populated
  1782  // chanfunding.PsbtAssembler using a FundingShim obtained from an RPC caller.
  1783  func newPsbtAssembler(req *lnrpc.OpenChannelRequest, normalizedMinConfs int32,
  1784  	psbtShim *lnrpc.PsbtShim, netParams *chaincfg.Params) (
  1785  	chanfunding.Assembler, error) {
  1786  
  1787  	var (
  1788  		packet *psbt.Packet
  1789  		err    error
  1790  	)
  1791  
  1792  	// Perform some basic sanity checks to ensure that all the expected
  1793  	// fields are populated and none of the incompatible fields are.
  1794  	if len(psbtShim.PendingChanId) != 32 {
  1795  		return nil, fmt.Errorf("pending chan ID not set")
  1796  	}
  1797  	if normalizedMinConfs != 1 {
  1798  		return nil, fmt.Errorf("setting non-default values for " +
  1799  			"minimum confirmation is not supported for PSBT " +
  1800  			"funding")
  1801  	}
  1802  	if req.AtomsPerByte != 0 || req.TargetConf != 0 {
  1803  		return nil, fmt.Errorf("specifying fee estimation parameters " +
  1804  			"is not supported for PSBT funding")
  1805  	}
  1806  
  1807  	// The base PSBT is optional. But if it's set, it has to be a valid,
  1808  	// binary serialized PSBT.
  1809  	if len(psbtShim.BasePsbt) > 0 {
  1810  		packet, err = psbt.NewFromRawBytes(
  1811  			bytes.NewReader(psbtShim.BasePsbt), false,
  1812  		)
  1813  		if err != nil {
  1814  			return nil, fmt.Errorf("error parsing base PSBT: %v",
  1815  				err)
  1816  		}
  1817  	}
  1818  
  1819  	// With all the parts assembled, we can now make the canned assembler
  1820  	// to pass into the wallet.
  1821  	return chanfunding.NewPsbtAssembler(
  1822  		dcrutil.Amount(req.LocalFundingAmount), packet, netParams,
  1823  		!psbtShim.NoPublish,
  1824  	), nil
  1825  }
  1826  
  1827  // canOpenChannel returns an error if the necessary subsystems for channel
  1828  // funding are not ready.
  1829  func (r *rpcServer) canOpenChannel() error {
  1830  	// We can't open a channel until the main server has started.
  1831  	if !r.server.Started() {
  1832  		return ErrServerNotActive
  1833  	}
  1834  
  1835  	// Creation of channels before the wallet syncs up is currently
  1836  	// disallowed.
  1837  	isSynced, _, err := r.server.cc.Wallet.IsSynced()
  1838  	if err != nil {
  1839  		return err
  1840  	}
  1841  	if !isSynced {
  1842  		return errors.New("channels cannot be created before the " +
  1843  			"wallet is fully synced")
  1844  	}
  1845  
  1846  	return nil
  1847  }
  1848  
  1849  // parseOpenChannelReq parses an OpenChannelRequest message into an InitFundingMsg
  1850  // struct. The logic is abstracted so that it can be shared between OpenChannel
  1851  // and OpenChannelSync.
  1852  func (r *rpcServer) parseOpenChannelReq(in *lnrpc.OpenChannelRequest,
  1853  	isSync bool) (*funding.InitFundingMsg, error) {
  1854  
  1855  	rpcsLog.Debugf("[openchannel] request to NodeKey(%x) "+
  1856  		"allocation(us=%v, them=%v)", in.NodePubkey,
  1857  		in.LocalFundingAmount, in.PushAtoms)
  1858  
  1859  	localFundingAmt := dcrutil.Amount(in.LocalFundingAmount)
  1860  	remoteInitialBalance := dcrutil.Amount(in.PushAtoms)
  1861  	minHtlcIn := lnwire.MilliAtom(in.MinHtlcMAtoms)
  1862  	remoteCsvDelay := uint16(in.RemoteCsvDelay)
  1863  	maxValue := lnwire.MilliAtom(in.RemoteMaxValueInFlightMAtoms)
  1864  	maxHtlcs := uint16(in.RemoteMaxHtlcs)
  1865  
  1866  	globalFeatureSet := r.server.featureMgr.Get(feature.SetNodeAnn)
  1867  
  1868  	// Ensure that the initial balance of the remote party (if pushing
  1869  	// atoms) does not exceed the amount the local party has requested
  1870  	// for funding.
  1871  	//
  1872  	// TODO(roasbeef): incorporate base fee?
  1873  	if remoteInitialBalance >= localFundingAmt {
  1874  		return nil, fmt.Errorf("amount pushed to remote peer for " +
  1875  			"initial state must be below the local funding amount")
  1876  	}
  1877  
  1878  	// Ensure that the user doesn't exceed the current soft-limit for
  1879  	// channel size. If the funding amount is above the soft-limit, then
  1880  	// we'll reject the request.
  1881  	wumboEnabled := globalFeatureSet.HasFeature(
  1882  		lnwire.WumboChannelsOptional,
  1883  	)
  1884  	if !wumboEnabled && localFundingAmt > MaxFundingAmount {
  1885  		return nil, fmt.Errorf("funding amount is too large, the max "+
  1886  			"channel size is: %v", MaxFundingAmount)
  1887  	}
  1888  
  1889  	// Restrict the size of the channel we'll actually open. At a later
  1890  	// level, we'll ensure that the output we create after accounting for
  1891  	// fees that a dust output isn't created.
  1892  	if localFundingAmt < funding.MinChanFundingSize {
  1893  		return nil, fmt.Errorf("channel is too small, the minimum "+
  1894  			"channel size is: %v atoms", int64(funding.MinChanFundingSize))
  1895  	}
  1896  
  1897  	// Prevent users from submitting a max-htlc value that would exceed the
  1898  	// protocol maximum.
  1899  	if maxHtlcs > input.MaxHTLCNumber/2 {
  1900  		return nil, fmt.Errorf("remote-max-htlcs (%v) cannot be "+
  1901  			"greater than %v", maxHtlcs, input.MaxHTLCNumber/2)
  1902  	}
  1903  
  1904  	// Then, we'll extract the minimum number of confirmations that each
  1905  	// output we use to fund the channel's funding transaction should
  1906  	// satisfy.
  1907  	minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed)
  1908  	if err != nil {
  1909  		return nil, err
  1910  	}
  1911  
  1912  	// TODO(roasbeef): also return channel ID?
  1913  
  1914  	var nodePubKey *secp256k1.PublicKey
  1915  
  1916  	// Parse the remote pubkey the NodePubkey field of the request. If it's
  1917  	// not present, we'll fallback to the deprecated version that parses the
  1918  	// key from a hex string if this is for REST for backwards compatibility.
  1919  	switch {
  1920  
  1921  	// Parse the raw bytes of the node key into a pubkey object so we can
  1922  	// easily manipulate it.
  1923  	case len(in.NodePubkey) > 0:
  1924  		nodePubKey, err = secp256k1.ParsePubKey(in.NodePubkey)
  1925  		if err != nil {
  1926  			return nil, err
  1927  		}
  1928  
  1929  	// Decode the provided target node's public key, parsing it into a pub
  1930  	// key object. For all sync call, byte slices are expected to be encoded
  1931  	// as hex strings.
  1932  	case isSync:
  1933  		keyBytes, err := hex.DecodeString(in.NodePubkeyString)
  1934  		if err != nil {
  1935  			return nil, err
  1936  		}
  1937  
  1938  		nodePubKey, err = secp256k1.ParsePubKey(keyBytes)
  1939  		if err != nil {
  1940  			return nil, err
  1941  		}
  1942  
  1943  	default:
  1944  		return nil, fmt.Errorf("NodePubkey is not set")
  1945  	}
  1946  
  1947  	// Making a channel to ourselves wouldn't be of any use, so we
  1948  	// explicitly disallow them.
  1949  	if nodePubKey.IsEqual(r.server.identityECDH.PubKey()) {
  1950  		return nil, fmt.Errorf("cannot open channel to self")
  1951  	}
  1952  
  1953  	// Calculate an appropriate fee rate for this transaction.
  1954  	feeRate, err := calculateFeeRate(
  1955  		uint64(in.AtomsPerByte),
  1956  		uint32(in.TargetConf), r.server.cc.FeeEstimator,
  1957  	)
  1958  	if err != nil {
  1959  		return nil, err
  1960  	}
  1961  
  1962  	rpcsLog.Debugf("[openchannel]: using fee of %v atom/kB for funding tx",
  1963  		int64(feeRate))
  1964  
  1965  	script, err := chancloser.ParseUpfrontShutdownAddress(
  1966  		in.CloseAddress, r.cfg.ActiveNetParams.Params,
  1967  	)
  1968  	if err != nil {
  1969  		return nil, fmt.Errorf("error parsing upfront shutdown: %v",
  1970  			err)
  1971  	}
  1972  
  1973  	var channelType *lnwire.ChannelType
  1974  	switch in.CommitmentType {
  1975  	case lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE:
  1976  		break
  1977  
  1978  	case lnrpc.CommitmentType_LEGACY:
  1979  		channelType = new(lnwire.ChannelType)
  1980  		*channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector())
  1981  
  1982  	case lnrpc.CommitmentType_STATIC_REMOTE_KEY:
  1983  		channelType = new(lnwire.ChannelType)
  1984  		*channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector(
  1985  			lnwire.StaticRemoteKeyRequired,
  1986  		))
  1987  
  1988  	case lnrpc.CommitmentType_ANCHORS:
  1989  		channelType = new(lnwire.ChannelType)
  1990  		*channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector(
  1991  			lnwire.StaticRemoteKeyRequired,
  1992  			lnwire.AnchorsZeroFeeHtlcTxRequired,
  1993  		))
  1994  
  1995  	case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
  1996  		channelType = new(lnwire.ChannelType)
  1997  		*channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector(
  1998  			lnwire.StaticRemoteKeyRequired,
  1999  			lnwire.AnchorsZeroFeeHtlcTxRequired,
  2000  			lnwire.ScriptEnforcedLeaseRequired,
  2001  		))
  2002  
  2003  	default:
  2004  		return nil, fmt.Errorf("unhandled request channel type %v",
  2005  			in.CommitmentType)
  2006  	}
  2007  
  2008  	// Instruct the server to trigger the necessary events to attempt to
  2009  	// open a new channel. A stream is returned in place, this stream will
  2010  	// be used to consume updates of the state of the pending channel.
  2011  	return &funding.InitFundingMsg{
  2012  		TargetPubkey:     nodePubKey,
  2013  		ChainHash:        r.cfg.ActiveNetParams.GenesisHash,
  2014  		LocalFundingAmt:  localFundingAmt,
  2015  		PushAmt:          lnwire.NewMAtomsFromAtoms(remoteInitialBalance),
  2016  		MinHtlcIn:        minHtlcIn,
  2017  		FundingFeePerKB:  feeRate,
  2018  		Private:          in.Private,
  2019  		RemoteCsvDelay:   remoteCsvDelay,
  2020  		MinConfs:         minConfs,
  2021  		ShutdownScript:   script,
  2022  		MaxValueInFlight: maxValue,
  2023  		MaxHtlcs:         maxHtlcs,
  2024  		MaxLocalCsv:      uint16(in.MaxLocalCsv),
  2025  		ChannelType:      channelType,
  2026  	}, nil
  2027  }
  2028  
  2029  // OpenChannel attempts to open a singly funded channel specified in the
  2030  // request to a remote peer.
  2031  func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
  2032  	updateStream lnrpc.Lightning_OpenChannelServer) error {
  2033  
  2034  	if err := r.canOpenChannel(); err != nil {
  2035  		return err
  2036  	}
  2037  
  2038  	req, err := r.parseOpenChannelReq(in, false)
  2039  	if err != nil {
  2040  		return err
  2041  	}
  2042  
  2043  	// If the user has provided a shim, then we'll now augment the based
  2044  	// open channel request with this additional logic.
  2045  	if in.FundingShim != nil {
  2046  		switch {
  2047  		// If we have a chan point shim, then this means the funding
  2048  		// transaction was crafted externally. In this case we only
  2049  		// need to hand a channel point down into the wallet.
  2050  		case in.FundingShim.GetChanPointShim() != nil:
  2051  			chanPointShim := in.FundingShim.GetChanPointShim()
  2052  
  2053  			// Map the channel point shim into a new
  2054  			// chanfunding.CannedAssembler that the wallet will use
  2055  			// to obtain the channel point details.
  2056  			copy(req.PendingChanID[:], chanPointShim.PendingChanId)
  2057  			req.ChanFunder, err = newFundingShimAssembler(
  2058  				chanPointShim, true, r.server.cc.KeyRing,
  2059  			)
  2060  			if err != nil {
  2061  				return err
  2062  			}
  2063  
  2064  		// If we have a PSBT shim, then this means the funding
  2065  		// transaction will be crafted outside of the wallet, once the
  2066  		// funding multisig output script is known. We'll create an
  2067  		// intent that will supervise the multi-step process.
  2068  		case in.FundingShim.GetPsbtShim() != nil:
  2069  			psbtShim := in.FundingShim.GetPsbtShim()
  2070  
  2071  			// Instruct the wallet to use the new
  2072  			// chanfunding.PsbtAssembler to construct the funding
  2073  			// transaction.
  2074  			copy(req.PendingChanID[:], psbtShim.PendingChanId)
  2075  			req.ChanFunder, err = newPsbtAssembler(
  2076  				in, req.MinConfs, psbtShim,
  2077  				&r.server.cc.Wallet.Cfg.NetParams,
  2078  			)
  2079  			if err != nil {
  2080  				return err
  2081  			}
  2082  		}
  2083  	}
  2084  
  2085  	updateChan, errChan := r.server.OpenChannel(req)
  2086  
  2087  	var outpoint wire.OutPoint
  2088  out:
  2089  	for {
  2090  		select {
  2091  		case err := <-errChan:
  2092  			rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
  2093  				req.TargetPubkey.SerializeCompressed(), err)
  2094  			return err
  2095  		case fundingUpdate := <-updateChan:
  2096  			rpcsLog.Tracef("[openchannel] sending update: %v",
  2097  				fundingUpdate)
  2098  			if err := updateStream.Send(fundingUpdate); err != nil {
  2099  				return err
  2100  			}
  2101  
  2102  			// If a final channel open update is being sent, then
  2103  			// we can break out of our recv loop as we no longer
  2104  			// need to process any further updates.
  2105  			update, ok := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
  2106  			if ok {
  2107  				chanPoint := update.ChanOpen.ChannelPoint
  2108  				txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
  2109  				if err != nil {
  2110  					return err
  2111  				}
  2112  				outpoint = wire.OutPoint{
  2113  					Hash:  *txid,
  2114  					Index: chanPoint.OutputIndex,
  2115  				}
  2116  
  2117  				break out
  2118  			}
  2119  		case <-r.quit:
  2120  			return nil
  2121  		}
  2122  	}
  2123  
  2124  	rpcsLog.Tracef("[openchannel] success NodeKey(%x), ChannelPoint(%v)",
  2125  		req.TargetPubkey.SerializeCompressed(), outpoint)
  2126  	return nil
  2127  }
  2128  
  2129  // OpenChannelSync is a synchronous version of the OpenChannel RPC call. This
  2130  // call is meant to be consumed by clients to the REST proxy. As with all other
  2131  // sync calls, all byte slices are instead to be populated as hex encoded
  2132  // strings.
  2133  func (r *rpcServer) OpenChannelSync(ctx context.Context,
  2134  	in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) {
  2135  
  2136  	if err := r.canOpenChannel(); err != nil {
  2137  		return nil, err
  2138  	}
  2139  
  2140  	req, err := r.parseOpenChannelReq(in, true)
  2141  	if err != nil {
  2142  		return nil, err
  2143  	}
  2144  
  2145  	updateChan, errChan := r.server.OpenChannel(req)
  2146  	select {
  2147  	// If an error occurs them immediately return the error to the client.
  2148  	case err := <-errChan:
  2149  		rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
  2150  			req.TargetPubkey.SerializeCompressed(), err)
  2151  		return nil, err
  2152  
  2153  	// Otherwise, wait for the first channel update. The first update sent
  2154  	// is when the funding transaction is broadcast to the network.
  2155  	case fundingUpdate := <-updateChan:
  2156  		rpcsLog.Tracef("[openchannel] sending update: %v",
  2157  			fundingUpdate)
  2158  
  2159  		// Parse out the txid of the pending funding transaction. The
  2160  		// sync client can use this to poll against the list of
  2161  		// PendingChannels.
  2162  		openUpdate := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
  2163  		chanUpdate := openUpdate.ChanPending
  2164  
  2165  		return &lnrpc.ChannelPoint{
  2166  			FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  2167  				FundingTxidBytes: chanUpdate.Txid,
  2168  			},
  2169  			OutputIndex: chanUpdate.OutputIndex,
  2170  		}, nil
  2171  	case <-r.quit:
  2172  		return nil, nil
  2173  	}
  2174  }
  2175  
  2176  // BatchOpenChannel attempts to open multiple single-funded channels in a
  2177  // single transaction in an atomic way. This means either all channel open
  2178  // requests succeed at once or all attempts are aborted if any of them fail.
  2179  // This is the safer variant of using PSBTs to manually fund a batch of
  2180  // channels through the OpenChannel RPC.
  2181  func (r *rpcServer) BatchOpenChannel(ctx context.Context,
  2182  	in *lnrpc.BatchOpenChannelRequest) (*lnrpc.BatchOpenChannelResponse,
  2183  	error) {
  2184  
  2185  	if err := r.canOpenChannel(); err != nil {
  2186  		return nil, err
  2187  	}
  2188  
  2189  	// We need the wallet kit server to do the heavy lifting on the PSBT
  2190  	// part. If we didn't rely on re-using the wallet kit server's logic we
  2191  	// would need to re-implement everything here. Since we deliver lnd with
  2192  	// the wallet kit server enabled by default we can assume it's okay to
  2193  	// make this functionality dependent on that server being active.
  2194  	var walletKitServer walletrpc.WalletKitServer
  2195  	for _, subServer := range r.subServers {
  2196  		if subServer.Name() == walletrpc.SubServerName {
  2197  			walletKitServer = subServer.(walletrpc.WalletKitServer)
  2198  		}
  2199  	}
  2200  	if walletKitServer == nil {
  2201  		return nil, fmt.Errorf("batch channel open is only possible " +
  2202  			"if walletrpc subserver is active")
  2203  	}
  2204  
  2205  	rpcsLog.Debugf("[batchopenchannel] request to open batch of %d "+
  2206  		"channels", len(in.Channels))
  2207  
  2208  	// Make sure there is at least one channel to open. We could say we want
  2209  	// at least two channels for a batch. But maybe it's nice if developers
  2210  	// can use the same API for a single channel as well as a batch of
  2211  	// channels.
  2212  	if len(in.Channels) == 0 {
  2213  		return nil, fmt.Errorf("specify at least one channel")
  2214  	}
  2215  
  2216  	// In case we remove a pending channel from the database, we need to set
  2217  	// a close height, so we'll just use the current best known height.
  2218  	_, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
  2219  	if err != nil {
  2220  		return nil, fmt.Errorf("error fetching best block: %v", err)
  2221  	}
  2222  
  2223  	// So far everything looks good and we can now start the heavy lifting
  2224  	// that's done in the funding package.
  2225  	requestParser := func(req *lnrpc.OpenChannelRequest) (
  2226  		*funding.InitFundingMsg, error) {
  2227  
  2228  		return r.parseOpenChannelReq(req, false)
  2229  	}
  2230  	channelAbandoner := func(point *wire.OutPoint) error {
  2231  		return r.abandonChan(point, uint32(bestHeight))
  2232  	}
  2233  	batcher := funding.NewBatcher(&funding.BatchConfig{
  2234  		RequestParser:    requestParser,
  2235  		ChannelAbandoner: channelAbandoner,
  2236  		ChannelOpener:    r.server.OpenChannel,
  2237  		WalletKitServer:  walletKitServer,
  2238  		Wallet:           r.server.cc.Wallet,
  2239  		NetParams:        &r.server.cc.Wallet.Cfg.NetParams,
  2240  		Quit:             r.quit,
  2241  	})
  2242  	rpcPoints, err := batcher.BatchFund(ctx, in)
  2243  	if err != nil {
  2244  		return nil, fmt.Errorf("batch funding failed: %v", err)
  2245  	}
  2246  
  2247  	// Now all that's left to do is send back the response with the channel
  2248  	// points we created.
  2249  	return &lnrpc.BatchOpenChannelResponse{
  2250  		PendingChannels: rpcPoints,
  2251  	}, nil
  2252  }
  2253  
  2254  // CloseChannel attempts to close an active channel identified by its channel
  2255  // point. The actions of this method can additionally be augmented to attempt
  2256  // a force close after a timeout period in the case of an inactive peer.
  2257  func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
  2258  	updateStream lnrpc.Lightning_CloseChannelServer) error {
  2259  
  2260  	if !r.server.Started() {
  2261  		return ErrServerNotActive
  2262  	}
  2263  
  2264  	// If the user didn't specify a channel point, then we'll reject this
  2265  	// request all together.
  2266  	if in.GetChannelPoint() == nil {
  2267  		return fmt.Errorf("must specify channel point in close channel")
  2268  	}
  2269  
  2270  	// If force closing a channel, the fee set in the commitment transaction
  2271  	// is used.
  2272  	if in.Force && (in.AtomsPerByte != 0 || in.TargetConf != 0) {
  2273  		return fmt.Errorf("force closing a channel uses a pre-defined fee")
  2274  	}
  2275  
  2276  	force := in.Force
  2277  	index := in.ChannelPoint.OutputIndex
  2278  	txid, err := lnrpc.GetChanPointFundingTxid(in.GetChannelPoint())
  2279  	if err != nil {
  2280  		rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err)
  2281  		return err
  2282  	}
  2283  	chanPoint := wire.NewOutPoint(txid, index, wire.TxTreeRegular)
  2284  
  2285  	rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v), force=%v",
  2286  		chanPoint, force)
  2287  
  2288  	var (
  2289  		updateChan chan interface{}
  2290  		errChan    chan error
  2291  	)
  2292  
  2293  	// TODO(roasbeef): if force and peer online then don't force?
  2294  
  2295  	// First, we'll fetch the channel as is, as we'll need to examine it
  2296  	// regardless of if this is a force close or not.
  2297  	channel, err := r.server.chanStateDB.FetchChannel(nil, *chanPoint)
  2298  	if err != nil {
  2299  		return err
  2300  	}
  2301  
  2302  	// We can't coop or force close restored channels or channels that have
  2303  	// experienced local data loss. Normally we would detect this in the
  2304  	// channel arbitrator if the channel has the status
  2305  	// ChanStatusLocalDataLoss after connecting to its peer. But if no
  2306  	// connection can be established, the channel arbitrator doesn't know it
  2307  	// can't be force closed yet.
  2308  	if channel.HasChanStatus(channeldb.ChanStatusRestored) ||
  2309  		channel.HasChanStatus(channeldb.ChanStatusLocalDataLoss) {
  2310  
  2311  		return fmt.Errorf("cannot close channel with state: %v",
  2312  			channel.ChanStatus())
  2313  	}
  2314  
  2315  	// Retrieve the best height of the chain, which we'll use to complete
  2316  	// either closing flow.
  2317  	_, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
  2318  	if err != nil {
  2319  		return err
  2320  	}
  2321  
  2322  	// If a force closure was requested, then we'll handle all the details
  2323  	// around the creation and broadcast of the unilateral closure
  2324  	// transaction here rather than going to the switch as we don't require
  2325  	// interaction from the peer.
  2326  	if force {
  2327  
  2328  		// As we're force closing this channel, as a precaution, we'll
  2329  		// ensure that the switch doesn't continue to see this channel
  2330  		// as eligible for forwarding HTLC's. If the peer is online,
  2331  		// then we'll also purge all of its indexes.
  2332  		remotePub := channel.IdentityPub
  2333  		if peer, err := r.server.FindPeer(remotePub); err == nil {
  2334  			// TODO(roasbeef): actually get the active channel
  2335  			// instead too?
  2336  			//  * so only need to grab from database
  2337  			peer.WipeChannel(&channel.FundingOutpoint)
  2338  		} else {
  2339  			chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
  2340  			r.server.htlcSwitch.RemoveLink(chanID)
  2341  		}
  2342  
  2343  		// With the necessary indexes cleaned up, we'll now force close
  2344  		// the channel.
  2345  		chainArbitrator := r.server.chainArb
  2346  		closingTx, err := chainArbitrator.ForceCloseContract(
  2347  			*chanPoint,
  2348  		)
  2349  		if err != nil {
  2350  			rpcsLog.Errorf("unable to force close transaction: %v", err)
  2351  			return err
  2352  		}
  2353  
  2354  		closingTxid := closingTx.TxHash()
  2355  
  2356  		// With the transaction broadcast, we send our first update to
  2357  		// the client.
  2358  		updateChan = make(chan interface{}, 2)
  2359  		updateChan <- &peer.PendingUpdate{
  2360  			Txid: closingTxid[:],
  2361  		}
  2362  
  2363  		errChan = make(chan error, 1)
  2364  		notifier := r.server.cc.ChainNotifier
  2365  		go peer.WaitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint,
  2366  			&closingTxid, closingTx.TxOut[0].PkScript, func() {
  2367  				// Respond to the local subsystem which
  2368  				// requested the channel closure.
  2369  				updateChan <- &peer.ChannelCloseUpdate{
  2370  					ClosingTxid: closingTxid[:],
  2371  					Success:     true,
  2372  				}
  2373  			})
  2374  	} else {
  2375  		// If this is a frozen channel, then we only allow the co-op
  2376  		// close to proceed if we were the responder to this channel if
  2377  		// the absolute thaw height has not been met.
  2378  		if channel.IsInitiator {
  2379  			absoluteThawHeight, err := channel.AbsoluteThawHeight()
  2380  			if err != nil {
  2381  				return err
  2382  			}
  2383  			if uint32(bestHeight) < absoluteThawHeight {
  2384  				return fmt.Errorf("cannot co-op close frozen "+
  2385  					"channel as initiator until height=%v, "+
  2386  					"(current_height=%v)",
  2387  					absoluteThawHeight, bestHeight)
  2388  			}
  2389  		}
  2390  
  2391  		// If the link is not known by the switch, we cannot gracefully close
  2392  		// the channel.
  2393  		channelID := lnwire.NewChanIDFromOutPoint(chanPoint)
  2394  		if _, err := r.server.htlcSwitch.GetLink(channelID); err != nil {
  2395  			rpcsLog.Debugf("Trying to non-force close offline channel with "+
  2396  				"chan_point=%v", chanPoint)
  2397  			return fmt.Errorf("unable to gracefully close channel while peer "+
  2398  				"is offline (try force closing it instead): %v", err)
  2399  		}
  2400  
  2401  		// Calculate an appropriate fee rate for this transaction.
  2402  		feeRate, err := calculateFeeRate(
  2403  			uint64(in.AtomsPerByte),
  2404  			uint32(in.TargetConf), r.server.cc.FeeEstimator,
  2405  		)
  2406  		if err != nil {
  2407  			return err
  2408  		}
  2409  
  2410  		rpcsLog.Debugf("Target atom/kB for closing transaction: %v",
  2411  			int64(feeRate))
  2412  
  2413  		// Before we attempt the cooperative channel closure, we'll
  2414  		// examine the channel to ensure that it doesn't have a
  2415  		// lingering HTLC.
  2416  		if len(channel.ActiveHtlcs()) != 0 {
  2417  			return fmt.Errorf("cannot co-op close channel " +
  2418  				"with active htlcs")
  2419  		}
  2420  
  2421  		// Otherwise, the caller has requested a regular interactive
  2422  		// cooperative channel closure. So we'll forward the request to
  2423  		// the htlc switch which will handle the negotiation and
  2424  		// broadcast details.
  2425  
  2426  		var deliveryScript lnwire.DeliveryAddress
  2427  
  2428  		// If a delivery address to close out to was specified, decode it.
  2429  		if len(in.DeliveryAddress) > 0 {
  2430  			// Decode the address provided.
  2431  			addr, err := stdaddr.DecodeAddress(
  2432  				in.DeliveryAddress, r.cfg.ActiveNetParams.Params,
  2433  			)
  2434  			if err != nil {
  2435  				return fmt.Errorf("invalid delivery address: %v", err)
  2436  			}
  2437  
  2438  			// Create a script to pay out to the address provided.
  2439  			deliveryScript, err = input.PayToAddrScript(addr)
  2440  			if err != nil {
  2441  				return err
  2442  			}
  2443  		}
  2444  
  2445  		updateChan, errChan = r.server.htlcSwitch.CloseLink(
  2446  			chanPoint, contractcourt.CloseRegular, feeRate, deliveryScript,
  2447  		)
  2448  	}
  2449  out:
  2450  	for {
  2451  		select {
  2452  		case err := <-errChan:
  2453  			rpcsLog.Errorf("[closechannel] unable to close "+
  2454  				"ChannelPoint(%v): %v", chanPoint, err)
  2455  			return err
  2456  		case closingUpdate := <-updateChan:
  2457  			rpcClosingUpdate, err := createRPCCloseUpdate(
  2458  				closingUpdate,
  2459  			)
  2460  			if err != nil {
  2461  				return err
  2462  			}
  2463  
  2464  			rpcsLog.Tracef("[closechannel] sending update: %v",
  2465  				rpcClosingUpdate)
  2466  
  2467  			if updateStream != nil {
  2468  				if err := updateStream.Send(rpcClosingUpdate); err != nil {
  2469  					return err
  2470  				}
  2471  			}
  2472  
  2473  			// If a final channel closing updates is being sent,
  2474  			// then we can break out of our dispatch loop as we no
  2475  			// longer need to process any further updates.
  2476  			switch closeUpdate := closingUpdate.(type) {
  2477  			case *peer.ChannelCloseUpdate:
  2478  				h, _ := chainhash.NewHash(closeUpdate.ClosingTxid)
  2479  				rpcsLog.Infof("[closechannel] close completed: "+
  2480  					"txid(%v)", h)
  2481  				break out
  2482  			}
  2483  		case <-r.quit:
  2484  			return nil
  2485  		}
  2486  	}
  2487  
  2488  	return nil
  2489  }
  2490  
  2491  func createRPCCloseUpdate(update interface{}) (
  2492  	*lnrpc.CloseStatusUpdate, error) {
  2493  
  2494  	switch u := update.(type) {
  2495  	case *peer.ChannelCloseUpdate:
  2496  		return &lnrpc.CloseStatusUpdate{
  2497  			Update: &lnrpc.CloseStatusUpdate_ChanClose{
  2498  				ChanClose: &lnrpc.ChannelCloseUpdate{
  2499  					ClosingTxid: u.ClosingTxid,
  2500  				},
  2501  			},
  2502  		}, nil
  2503  	case *peer.PendingUpdate:
  2504  		return &lnrpc.CloseStatusUpdate{
  2505  			Update: &lnrpc.CloseStatusUpdate_ClosePending{
  2506  				ClosePending: &lnrpc.PendingUpdate{
  2507  					Txid:        u.Txid,
  2508  					OutputIndex: u.OutputIndex,
  2509  				},
  2510  			},
  2511  		}, nil
  2512  	}
  2513  
  2514  	return nil, errors.New("unknown close status update")
  2515  }
  2516  
  2517  // abandonChanFromGraph attempts to remove a channel from the channel graph. If
  2518  // we can't find the chanID in the graph, then we assume it has already been
  2519  // removed, and will return a nop.
  2520  func abandonChanFromGraph(chanGraph *channeldb.ChannelGraph,
  2521  	chanPoint *wire.OutPoint) error {
  2522  
  2523  	// First, we'll obtain the channel ID. If we can't locate this, then
  2524  	// it's the case that the channel may have already been removed from
  2525  	// the graph, so we'll return a nil error.
  2526  	chanID, err := chanGraph.ChannelID(chanPoint)
  2527  	switch {
  2528  	case err == channeldb.ErrEdgeNotFound:
  2529  		return nil
  2530  	case err != nil:
  2531  		return err
  2532  	}
  2533  
  2534  	// If the channel ID is still in the graph, then that means the channel
  2535  	// is still open, so we'll now move to purge it from the graph.
  2536  	return chanGraph.DeleteChannelEdges(false, chanID)
  2537  }
  2538  
  2539  // abandonChan removes a channel from the database, graph and contract court.
  2540  func (r *rpcServer) abandonChan(chanPoint *wire.OutPoint,
  2541  	bestHeight uint32) error {
  2542  
  2543  	// Abandoning a channel is a three-step process: remove from the open
  2544  	// channel state, remove from the graph, remove from the contract
  2545  	// court. Between any step it's possible that the users restarts the
  2546  	// process all over again. As a result, each of the steps below are
  2547  	// intended to be idempotent.
  2548  	err := r.server.chanStateDB.AbandonChannel(chanPoint, bestHeight)
  2549  	if err != nil {
  2550  		return err
  2551  	}
  2552  	err = abandonChanFromGraph(r.server.graphDB, chanPoint)
  2553  	if err != nil {
  2554  		return err
  2555  	}
  2556  	err = r.server.chainArb.ResolveContract(*chanPoint)
  2557  	if err != nil {
  2558  		return err
  2559  	}
  2560  
  2561  	// If this channel was in the process of being closed, but didn't fully
  2562  	// close, then it's possible that the nursery is hanging on to some
  2563  	// state. To err on the side of caution, we'll now attempt to wipe any
  2564  	// state for this channel from the nursery.
  2565  	err = r.server.utxoNursery.RemoveChannel(chanPoint)
  2566  	if err != nil && err != contractcourt.ErrContractNotFound {
  2567  		return err
  2568  	}
  2569  
  2570  	// Finally, notify the backup listeners that the channel can be removed
  2571  	// from any channel backups.
  2572  	r.server.channelNotifier.NotifyClosedChannelEvent(*chanPoint)
  2573  
  2574  	return nil
  2575  }
  2576  
  2577  // AbandonChannel removes all channel state from the database except for a
  2578  // close summary. This method can be used to get rid of permanently unusable
  2579  // channels due to bugs fixed in newer versions of lnd.
  2580  func (r *rpcServer) AbandonChannel(_ context.Context,
  2581  	in *lnrpc.AbandonChannelRequest) (*lnrpc.AbandonChannelResponse, error) {
  2582  
  2583  	// If this isn't the dev build, then we won't allow the RPC to be
  2584  	// executed, as it's an advanced feature and won't be activated in
  2585  	// regular production/release builds except for the explicit case of
  2586  	// externally funded channels that are still pending. Due to repeated
  2587  	// requests, we also allow this requirement to be overwritten by a new
  2588  	// flag that attests to the user knowing what they're doing and the risk
  2589  	// associated with the command/RPC.
  2590  	if !in.IKnowWhatIAmDoing && !in.PendingFundingShimOnly &&
  2591  		!build.IsDevBuild() {
  2592  
  2593  		return nil, fmt.Errorf("AbandonChannel RPC call only " +
  2594  			"available in dev builds")
  2595  	}
  2596  
  2597  	// We'll parse out the arguments to we can obtain the chanPoint of the
  2598  	// target channel.
  2599  	txid, err := lnrpc.GetChanPointFundingTxid(in.GetChannelPoint())
  2600  	if err != nil {
  2601  		return nil, err
  2602  	}
  2603  	index := in.ChannelPoint.OutputIndex
  2604  	chanPoint := wire.NewOutPoint(txid, index, wire.TxTreeRegular)
  2605  
  2606  	// When we remove the channel from the database, we need to set a close
  2607  	// height, so we'll just use the current best known height.
  2608  	_, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
  2609  	if err != nil {
  2610  		return nil, err
  2611  	}
  2612  
  2613  	dbChan, err := r.server.chanStateDB.FetchChannel(nil, *chanPoint)
  2614  	switch {
  2615  	// If the channel isn't found in the set of open channels, then we can
  2616  	// continue on as it can't be loaded into the link/peer.
  2617  	case err == channeldb.ErrChannelNotFound:
  2618  		break
  2619  
  2620  	// If the channel is still known to be open, then before we modify any
  2621  	// on-disk state, we'll remove the channel from the switch and peer
  2622  	// state if it's been loaded in.
  2623  	case err == nil:
  2624  		// If the user requested the more safe version that only allows
  2625  		// the removal of externally (shim) funded channels that are
  2626  		// still pending, we enforce this option now that we know the
  2627  		// state of the channel.
  2628  		//
  2629  		// TODO(guggero): Properly store the funding type (wallet, shim,
  2630  		// PSBT) on the channel so we don't need to use the thaw height.
  2631  		isShimFunded := dbChan.ThawHeight > 0
  2632  		isPendingShimFunded := isShimFunded && dbChan.IsPending
  2633  		if !in.IKnowWhatIAmDoing && in.PendingFundingShimOnly &&
  2634  			!isPendingShimFunded {
  2635  
  2636  			return nil, fmt.Errorf("channel %v is not externally "+
  2637  				"funded or not pending", chanPoint)
  2638  		}
  2639  
  2640  		// We'll mark the channel as borked before we remove the state
  2641  		// from the switch/peer so it won't be loaded back in if the
  2642  		// peer reconnects.
  2643  		if err := dbChan.MarkBorked(); err != nil {
  2644  			return nil, err
  2645  		}
  2646  		remotePub := dbChan.IdentityPub
  2647  		if peer, err := r.server.FindPeer(remotePub); err == nil {
  2648  			peer.WipeChannel(chanPoint)
  2649  		}
  2650  
  2651  	default:
  2652  		return nil, err
  2653  	}
  2654  
  2655  	// Remove the channel from the graph, database and contract court.
  2656  	if err := r.abandonChan(chanPoint, uint32(bestHeight)); err != nil {
  2657  		return nil, err
  2658  	}
  2659  
  2660  	return &lnrpc.AbandonChannelResponse{}, nil
  2661  }
  2662  
  2663  // GetInfo returns general information concerning the lightning node including
  2664  // its identity pubkey, alias, the chains it is connected to, and information
  2665  // concerning the number of open+pending channels.
  2666  func (r *rpcServer) GetInfo(_ context.Context,
  2667  	_ *lnrpc.GetInfoRequest) (*lnrpc.GetInfoResponse, error) {
  2668  
  2669  	serverPeers := r.server.Peers()
  2670  
  2671  	openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
  2672  	if err != nil {
  2673  		return nil, err
  2674  	}
  2675  
  2676  	var activeChannels uint32
  2677  	for _, channel := range openChannels {
  2678  		chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
  2679  		if r.server.htlcSwitch.HasActiveLink(chanID) {
  2680  			activeChannels++
  2681  		}
  2682  	}
  2683  
  2684  	inactiveChannels := uint32(len(openChannels)) - activeChannels
  2685  
  2686  	pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
  2687  	if err != nil {
  2688  		return nil, fmt.Errorf("unable to get retrieve pending "+
  2689  			"channels: %v", err)
  2690  	}
  2691  	nPendingChannels := uint32(len(pendingChannels))
  2692  
  2693  	idPub := r.server.identityECDH.PubKey().SerializeCompressed()
  2694  	encodedIDPub := hex.EncodeToString(idPub)
  2695  
  2696  	bestHash, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
  2697  	if err != nil {
  2698  		return nil, fmt.Errorf("unable to get best block info: %v", err)
  2699  	}
  2700  
  2701  	isSynced, bestHeaderTimestamp, err := r.server.cc.Wallet.IsSynced()
  2702  	if err != nil {
  2703  		return nil, fmt.Errorf("unable to sync PoV of the wallet "+
  2704  			"with current best block in the main chain: %v", err)
  2705  	}
  2706  
  2707  	// If the router does full channel validation, it has a lot of work to
  2708  	// do for each block. So it might be possible that it isn't yet up to
  2709  	// date with the most recent block, even if the wallet is. This can
  2710  	// happen in environments with high CPU load (such as parallel itests).
  2711  	// Since the `synced_to_chain` flag in the response of this call is used
  2712  	// by many wallets (and also our itests) to make sure everything's up to
  2713  	// date, we add the router's state to it. So the flag will only toggle
  2714  	// to true once the router was also able to catch up.
  2715  	if !r.cfg.Routing.AssumeChannelValid {
  2716  		routerHeight := r.server.chanRouter.SyncedHeight()
  2717  		isSynced = isSynced && uint32(bestHeight) == routerHeight
  2718  	}
  2719  
  2720  	network := lncfg.NormalizeNetwork(r.cfg.ActiveNetParams.Name)
  2721  	activeChains := make([]*lnrpc.Chain, r.cfg.registeredChains.NumActiveChains())
  2722  	for i, chain := range r.cfg.registeredChains.ActiveChains() {
  2723  		activeChains[i] = &lnrpc.Chain{
  2724  			Chain:   chain.String(),
  2725  			Network: network,
  2726  		}
  2727  	}
  2728  
  2729  	// Check if external IP addresses were provided to lnd and use them
  2730  	// to set the URIs.
  2731  	nodeAnn, err := r.server.genNodeAnnouncement(false)
  2732  	if err != nil {
  2733  		return nil, fmt.Errorf("unable to retrieve current fully signed "+
  2734  			"node announcement: %v", err)
  2735  	}
  2736  	addrs := nodeAnn.Addresses
  2737  	uris := make([]string, len(addrs))
  2738  	for i, addr := range addrs {
  2739  		uris[i] = fmt.Sprintf("%s@%s", encodedIDPub, addr.String())
  2740  	}
  2741  
  2742  	isGraphSynced := r.server.authGossiper.SyncManager().IsGraphSynced()
  2743  
  2744  	features := make(map[uint32]*lnrpc.Feature)
  2745  	sets := r.server.featureMgr.ListSets()
  2746  
  2747  	for _, set := range sets {
  2748  		// Get the a list of lnrpc features for each set we support.
  2749  		featureVector := r.server.featureMgr.Get(set)
  2750  		rpcFeatures := invoicesrpc.CreateRPCFeatures(featureVector)
  2751  
  2752  		// Add the features to our map of features, allowing over writing of
  2753  		// existing values because features in different sets with the same bit
  2754  		// are duplicated across sets.
  2755  		for bit, feature := range rpcFeatures {
  2756  			features[bit] = feature
  2757  		}
  2758  	}
  2759  
  2760  	routerPruneTarget, routerPruneHeight := r.server.chanRouter.StartupPruneProgress()
  2761  
  2762  	// TODO(roasbeef): add synced height n stuff
  2763  	return &lnrpc.GetInfoResponse{
  2764  		IdentityPubkey:      encodedIDPub,
  2765  		NumPendingChannels:  nPendingChannels,
  2766  		NumActiveChannels:   activeChannels,
  2767  		NumInactiveChannels: inactiveChannels,
  2768  		NumPeers:            uint32(len(serverPeers)),
  2769  		BlockHeight:         uint32(bestHeight),
  2770  		BlockHash:           bestHash.String(),
  2771  		SyncedToChain:       isSynced,
  2772  		Testnet:             chainreg.IsTestnet(&r.cfg.ActiveNetParams),
  2773  		Chains:              activeChains,
  2774  		Uris:                uris,
  2775  		Alias:               nodeAnn.Alias.String(),
  2776  		Color:               routing.EncodeHexColor(nodeAnn.RGBColor),
  2777  		BestHeaderTimestamp: bestHeaderTimestamp,
  2778  		Version:             build.Version(),
  2779  		CommitHash:          build.Commit,
  2780  		SyncedToGraph:       isGraphSynced,
  2781  		Features:            features,
  2782  		ServerActive:        r.server.Started() && !r.server.Stopped(),
  2783  		RouterPruneTarget:   routerPruneTarget,
  2784  		RouterPruneHeight:   routerPruneHeight,
  2785  	}, nil
  2786  }
  2787  
  2788  // GetRecoveryInfo returns a boolean indicating whether the wallet is started
  2789  // in recovery mode, whether the recovery is finished, and the progress made
  2790  // so far.
  2791  func (r *rpcServer) GetRecoveryInfo(ctx context.Context,
  2792  	in *lnrpc.GetRecoveryInfoRequest) (*lnrpc.GetRecoveryInfoResponse, error) {
  2793  
  2794  	isRecoveryMode, progress, err := r.server.cc.Wallet.GetRecoveryInfo()
  2795  	if err != nil {
  2796  		return nil, fmt.Errorf("unable to get wallet recovery info: %v", err)
  2797  	}
  2798  
  2799  	rpcsLog.Debugf("[getrecoveryinfo] is recovery mode=%v, progress=%v",
  2800  		isRecoveryMode, progress)
  2801  
  2802  	return &lnrpc.GetRecoveryInfoResponse{
  2803  		RecoveryMode:     isRecoveryMode,
  2804  		RecoveryFinished: progress == 1,
  2805  		Progress:         progress,
  2806  	}, nil
  2807  }
  2808  
  2809  // ListPeers returns a verbose listing of all currently active peers.
  2810  func (r *rpcServer) ListPeers(ctx context.Context,
  2811  	in *lnrpc.ListPeersRequest) (*lnrpc.ListPeersResponse, error) {
  2812  
  2813  	rpcsLog.Tracef("[listpeers] request")
  2814  
  2815  	serverPeers := r.server.Peers()
  2816  	resp := &lnrpc.ListPeersResponse{
  2817  		Peers: make([]*lnrpc.Peer, 0, len(serverPeers)),
  2818  	}
  2819  
  2820  	for _, serverPeer := range serverPeers {
  2821  		var (
  2822  			atomsSent int64
  2823  			atomsRecv int64
  2824  		)
  2825  
  2826  		// In order to display the total number of atoms of outbound
  2827  		// (sent) and inbound (recv'd) atoms that have been
  2828  		// transported through this peer, we'll sum up the sent/recv'd
  2829  		// values for each of the active channels we have with the
  2830  		// peer.
  2831  		chans := serverPeer.ChannelSnapshots()
  2832  		for _, c := range chans {
  2833  			atomsSent += int64(c.TotalMAtomsSent.ToAtoms())
  2834  			atomsRecv += int64(c.TotalMAtomsReceived.ToAtoms())
  2835  		}
  2836  
  2837  		nodePub := serverPeer.PubKey()
  2838  
  2839  		// Retrieve the peer's sync type. If we don't currently have a
  2840  		// syncer for the peer, then we'll default to a passive sync.
  2841  		// This can happen if the RPC is called while a peer is
  2842  		// initializing.
  2843  		syncer, ok := r.server.authGossiper.SyncManager().GossipSyncer(
  2844  			nodePub,
  2845  		)
  2846  
  2847  		var lnrpcSyncType lnrpc.Peer_SyncType
  2848  		if !ok {
  2849  			rpcsLog.Warnf("Gossip syncer for peer=%x not found",
  2850  				nodePub)
  2851  			lnrpcSyncType = lnrpc.Peer_UNKNOWN_SYNC
  2852  		} else {
  2853  			syncType := syncer.SyncType()
  2854  			switch syncType {
  2855  			case discovery.ActiveSync:
  2856  				lnrpcSyncType = lnrpc.Peer_ACTIVE_SYNC
  2857  			case discovery.PassiveSync:
  2858  				lnrpcSyncType = lnrpc.Peer_PASSIVE_SYNC
  2859  			case discovery.PinnedSync:
  2860  				lnrpcSyncType = lnrpc.Peer_PINNED_SYNC
  2861  			default:
  2862  				return nil, fmt.Errorf("unhandled sync type %v",
  2863  					syncType)
  2864  			}
  2865  		}
  2866  
  2867  		features := invoicesrpc.CreateRPCFeatures(
  2868  			serverPeer.RemoteFeatures(),
  2869  		)
  2870  
  2871  		rpcPeer := &lnrpc.Peer{
  2872  			PubKey:          hex.EncodeToString(nodePub[:]),
  2873  			Address:         serverPeer.Conn().RemoteAddr().String(),
  2874  			Inbound:         serverPeer.Inbound(),
  2875  			BytesRecv:       serverPeer.BytesReceived(),
  2876  			BytesSent:       serverPeer.BytesSent(),
  2877  			AtomsSent:       atomsSent,
  2878  			AtomsRecv:       atomsRecv,
  2879  			PingTime:        serverPeer.PingTime(),
  2880  			SyncType:        lnrpcSyncType,
  2881  			Features:        features,
  2882  			LastPingPayload: serverPeer.LastRemotePingPayload(),
  2883  		}
  2884  
  2885  		var peerErrors []interface{}
  2886  
  2887  		// If we only want the most recent error, get the most recent
  2888  		// error from the buffer and add it to our list of errors if
  2889  		// it is non-nil. If we want all the stored errors, simply
  2890  		// add the full list to our set of errors.
  2891  		if in.LatestError {
  2892  			latestErr := serverPeer.ErrorBuffer().Latest()
  2893  			if latestErr != nil {
  2894  				peerErrors = []interface{}{latestErr}
  2895  			}
  2896  		} else {
  2897  			peerErrors = serverPeer.ErrorBuffer().List()
  2898  		}
  2899  
  2900  		// Add the relevant peer errors to our response.
  2901  		for _, error := range peerErrors {
  2902  			tsError := error.(*peer.TimestampedError)
  2903  
  2904  			rpcErr := &lnrpc.TimestampedError{
  2905  				Timestamp: uint64(tsError.Timestamp.Unix()),
  2906  				Error:     tsError.Error.Error(),
  2907  			}
  2908  
  2909  			rpcPeer.Errors = append(rpcPeer.Errors, rpcErr)
  2910  		}
  2911  
  2912  		// If the server has started, we can query the event store
  2913  		// for our peer's flap count. If we do so when the server has
  2914  		// not started, the request will block.
  2915  		if r.server.Started() {
  2916  			vertex, err := route.NewVertexFromBytes(nodePub[:])
  2917  			if err != nil {
  2918  				return nil, err
  2919  			}
  2920  
  2921  			flap, ts, err := r.server.chanEventStore.FlapCount(
  2922  				vertex,
  2923  			)
  2924  			if err != nil {
  2925  				return nil, err
  2926  			}
  2927  
  2928  			// If our timestamp is non-nil, we have values for our
  2929  			// peer's flap count, so we set them.
  2930  			if ts != nil {
  2931  				rpcPeer.FlapCount = int32(flap)
  2932  				rpcPeer.LastFlapNs = ts.UnixNano()
  2933  			}
  2934  		}
  2935  
  2936  		resp.Peers = append(resp.Peers, rpcPeer)
  2937  	}
  2938  
  2939  	rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
  2940  
  2941  	return resp, nil
  2942  }
  2943  
  2944  // SubscribePeerEvents returns a uni-directional stream (server -> client)
  2945  // for notifying the client of peer online and offline events.
  2946  func (r *rpcServer) SubscribePeerEvents(req *lnrpc.PeerEventSubscription,
  2947  	eventStream lnrpc.Lightning_SubscribePeerEventsServer) error {
  2948  
  2949  	peerEventSub, err := r.server.peerNotifier.SubscribePeerEvents()
  2950  	if err != nil {
  2951  		return err
  2952  	}
  2953  	defer peerEventSub.Cancel()
  2954  
  2955  	for {
  2956  		select {
  2957  		// A new update has been sent by the peer notifier, we'll
  2958  		// marshal it into the form expected by the gRPC client, then
  2959  		// send it off to the client.
  2960  		case e := <-peerEventSub.Updates():
  2961  			var event *lnrpc.PeerEvent
  2962  
  2963  			switch peerEvent := e.(type) {
  2964  			case peernotifier.PeerOfflineEvent:
  2965  				event = &lnrpc.PeerEvent{
  2966  					PubKey: hex.EncodeToString(peerEvent.PubKey[:]),
  2967  					Type:   lnrpc.PeerEvent_PEER_OFFLINE,
  2968  				}
  2969  
  2970  			case peernotifier.PeerOnlineEvent:
  2971  				event = &lnrpc.PeerEvent{
  2972  					PubKey: hex.EncodeToString(peerEvent.PubKey[:]),
  2973  					Type:   lnrpc.PeerEvent_PEER_ONLINE,
  2974  				}
  2975  
  2976  			default:
  2977  				return fmt.Errorf("unexpected peer event: %v", event)
  2978  			}
  2979  
  2980  			if err := eventStream.Send(event); err != nil {
  2981  				return err
  2982  			}
  2983  
  2984  		// The response stream's context for whatever reason has been
  2985  		// closed. If context is closed by an exceeded deadline we will
  2986  		// return an error.
  2987  		case <-eventStream.Context().Done():
  2988  			if errors.Is(eventStream.Context().Err(), context.Canceled) {
  2989  				return nil
  2990  			}
  2991  			return eventStream.Context().Err()
  2992  
  2993  		case <-r.quit:
  2994  			return nil
  2995  		}
  2996  	}
  2997  }
  2998  
  2999  // WalletBalance returns total unspent outputs(confirmed and unconfirmed), all
  3000  // confirmed unspent outputs and all unconfirmed unspent outputs under control
  3001  // by the wallet. This method can be modified by having the request specify
  3002  // only witness outputs should be factored into the final output sum.
  3003  // TODO(roasbeef): add async hooks into wallet balance changes
  3004  func (r *rpcServer) WalletBalance(ctx context.Context,
  3005  	in *lnrpc.WalletBalanceRequest) (*lnrpc.WalletBalanceResponse, error) {
  3006  
  3007  	// Retrieve all existing wallet accounts. We'll compute the confirmed
  3008  	// and unconfirmed balance for each and tally them up.
  3009  	accounts, err := r.server.cc.Wallet.ListAccounts("")
  3010  	if err != nil {
  3011  		return nil, err
  3012  	}
  3013  
  3014  	var totalBalance, confirmedBalance, unconfirmedBalance dcrutil.Amount
  3015  	rpcAccountBalances := make(
  3016  		map[string]*lnrpc.WalletAccountBalance, len(accounts),
  3017  	)
  3018  	for _, account := range accounts {
  3019  		switch account.AccountName {
  3020  		case "imported":
  3021  			// Omit the imported account from the response unless we
  3022  			// actually have any keys imported.
  3023  			if account.ImportedKeyCount == 0 {
  3024  				continue
  3025  			}
  3026  
  3027  			fallthrough
  3028  
  3029  		case lnwallet.DefaultAccountName:
  3030  			if _, ok := rpcAccountBalances[account.AccountName]; ok {
  3031  				continue
  3032  			}
  3033  
  3034  		default:
  3035  		}
  3036  
  3037  		// Get total balance, from txs that have >= 0 confirmations.
  3038  		totalBal, err := r.server.cc.Wallet.ConfirmedBalance(
  3039  			0, account.AccountName,
  3040  		)
  3041  		if err != nil {
  3042  			return nil, err
  3043  		}
  3044  		totalBalance += totalBal
  3045  
  3046  		// Get confirmed balance, from txs that have >= 1 confirmations.
  3047  		// TODO(halseth): get both unconfirmed and confirmed balance in
  3048  		// one call, as this is racy.
  3049  		confirmedBal, err := r.server.cc.Wallet.ConfirmedBalance(
  3050  			1, account.AccountName,
  3051  		)
  3052  		if err != nil {
  3053  			return nil, err
  3054  		}
  3055  		confirmedBalance += confirmedBal
  3056  
  3057  		// Get unconfirmed balance, from txs with 0 confirmations.
  3058  		unconfirmedBal := totalBal - confirmedBal
  3059  		unconfirmedBalance += unconfirmedBal
  3060  
  3061  		rpcAccountBalances[account.AccountName] = &lnrpc.WalletAccountBalance{
  3062  			ConfirmedBalance:   int64(confirmedBal),
  3063  			UnconfirmedBalance: int64(unconfirmedBal),
  3064  		}
  3065  	}
  3066  
  3067  	// Now that we have the base balance accounted for with each account,
  3068  	// we'll look at the set of locked UTXOs to tally that as well. If we
  3069  	// don't display this, then anytime we attempt a funding reservation,
  3070  	// the outputs will chose as being "gone" until they're confirmed on
  3071  	// chain.
  3072  	//
  3073  	// Note(decred): this is commented out in dcrlnd because
  3074  	// ListLeasedOutputs is not implemented in dcrwallet.
  3075  	var lockedBalance dcrutil.Amount
  3076  	// leases, err := r.server.cc.Wallet.ListLeasedOutputs()
  3077  	leases, err := []*lnwallet.LockedOutput{}, error(nil)
  3078  	if err != nil {
  3079  		return nil, err
  3080  	}
  3081  	for _, leasedOutput := range leases {
  3082  		utxoInfo, err := r.server.cc.Wallet.FetchInputInfo(
  3083  			&leasedOutput.Outpoint,
  3084  		)
  3085  		if err != nil {
  3086  			return nil, err
  3087  		}
  3088  
  3089  		lockedBalance += utxoInfo.Value
  3090  	}
  3091  
  3092  	rpcsLog.Debugf("[walletbalance] Total balance=%v (confirmed=%v, "+
  3093  		"unconfirmed=%v)", totalBalance, confirmedBalance,
  3094  		unconfirmedBalance)
  3095  
  3096  	return &lnrpc.WalletBalanceResponse{
  3097  		TotalBalance:       int64(totalBalance),
  3098  		ConfirmedBalance:   int64(confirmedBalance),
  3099  		UnconfirmedBalance: int64(unconfirmedBalance),
  3100  		LockedBalance:      int64(lockedBalance),
  3101  		AccountBalance:     rpcAccountBalances,
  3102  	}, nil
  3103  }
  3104  
  3105  // ChannelBalance returns the total available channel flow across all open
  3106  // channels in atoms.
  3107  func (r *rpcServer) ChannelBalance(ctx context.Context,
  3108  	in *lnrpc.ChannelBalanceRequest) (
  3109  	*lnrpc.ChannelBalanceResponse, error) {
  3110  
  3111  	var (
  3112  		localBalance             lnwire.MilliAtom
  3113  		remoteBalance            lnwire.MilliAtom
  3114  		unsettledLocalBalance    lnwire.MilliAtom
  3115  		unsettledRemoteBalance   lnwire.MilliAtom
  3116  		pendingOpenLocalBalance  lnwire.MilliAtom
  3117  		pendingOpenRemoteBalance lnwire.MilliAtom
  3118  		maxInbound               dcrutil.Amount
  3119  		maxOutbound              dcrutil.Amount
  3120  	)
  3121  
  3122  	openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
  3123  	if err != nil {
  3124  		return nil, err
  3125  	}
  3126  
  3127  	for _, channel := range openChannels {
  3128  		c := channel.LocalCommitment
  3129  		localBalance += c.LocalBalance
  3130  		remoteBalance += c.RemoteBalance
  3131  
  3132  		// Add pending htlc amount.
  3133  		for _, htlc := range c.Htlcs {
  3134  			if htlc.Incoming {
  3135  				unsettledLocalBalance += htlc.Amt
  3136  			} else {
  3137  				unsettledRemoteBalance += htlc.Amt
  3138  			}
  3139  		}
  3140  
  3141  		local := channel.LocalCommitment.LocalBalance.ToAtoms()
  3142  		localReserve := channel.LocalChanCfg.ChannelConstraints.ChanReserve
  3143  		remote := channel.RemoteCommitment.RemoteBalance.ToAtoms()
  3144  		remoteReserve := channel.RemoteChanCfg.ChannelConstraints.ChanReserve
  3145  
  3146  		// The maximum amount we can receive from this channel is however much
  3147  		// the remote node has, minus its required channel reserve.
  3148  		if remote > remoteReserve {
  3149  			maxInbound += remote - remoteReserve
  3150  		}
  3151  
  3152  		// The maximum amount we can send accoss this channel is however much
  3153  		// the local node has, minus what the remote node requires us to
  3154  		// reserve.
  3155  		if local > localReserve {
  3156  			maxOutbound += local - localReserve
  3157  		}
  3158  	}
  3159  
  3160  	pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
  3161  	if err != nil {
  3162  		return nil, err
  3163  	}
  3164  
  3165  	for _, channel := range pendingChannels {
  3166  		c := channel.LocalCommitment
  3167  		pendingOpenLocalBalance += c.LocalBalance
  3168  		pendingOpenRemoteBalance += c.RemoteBalance
  3169  	}
  3170  
  3171  	rpcsLog.Debugf("[channelbalance] local_balance=%v remote_balance=%v "+
  3172  		"unsettled_local_balance=%v unsettled_remote_balance=%v "+
  3173  		"pending_open_local_balance=%v pending_open_remote_balance=%v",
  3174  		localBalance, remoteBalance, unsettledLocalBalance,
  3175  		unsettledRemoteBalance, pendingOpenLocalBalance,
  3176  		pendingOpenRemoteBalance)
  3177  
  3178  	return &lnrpc.ChannelBalanceResponse{
  3179  		LocalBalance: &lnrpc.Amount{
  3180  			Atoms:  uint64(localBalance.ToAtoms()),
  3181  			Matoms: uint64(localBalance),
  3182  		},
  3183  		RemoteBalance: &lnrpc.Amount{
  3184  			Atoms:  uint64(remoteBalance.ToAtoms()),
  3185  			Matoms: uint64(remoteBalance),
  3186  		},
  3187  		UnsettledLocalBalance: &lnrpc.Amount{
  3188  			Atoms:  uint64(unsettledLocalBalance.ToAtoms()),
  3189  			Matoms: uint64(unsettledLocalBalance),
  3190  		},
  3191  		UnsettledRemoteBalance: &lnrpc.Amount{
  3192  			Atoms:  uint64(unsettledRemoteBalance.ToAtoms()),
  3193  			Matoms: uint64(unsettledRemoteBalance),
  3194  		},
  3195  		PendingOpenLocalBalance: &lnrpc.Amount{
  3196  			Atoms:  uint64(pendingOpenLocalBalance.ToAtoms()),
  3197  			Matoms: uint64(pendingOpenLocalBalance),
  3198  		},
  3199  		PendingOpenRemoteBalance: &lnrpc.Amount{
  3200  			Atoms:  uint64(pendingOpenRemoteBalance.ToAtoms()),
  3201  			Matoms: uint64(pendingOpenRemoteBalance),
  3202  		},
  3203  
  3204  		// dcrlnd fields.
  3205  		MaxInboundAmount:  int64(maxInbound),
  3206  		MaxOutboundAmount: int64(maxOutbound),
  3207  
  3208  		// Deprecated fields.
  3209  		Balance:            int64(localBalance.ToAtoms()),
  3210  		PendingOpenBalance: int64(pendingOpenLocalBalance.ToAtoms()),
  3211  	}, nil
  3212  }
  3213  
  3214  type (
  3215  	pendingOpenChannels  []*lnrpc.PendingChannelsResponse_PendingOpenChannel
  3216  	pendingForceClose    []*lnrpc.PendingChannelsResponse_ForceClosedChannel
  3217  	waitingCloseChannels []*lnrpc.PendingChannelsResponse_WaitingCloseChannel
  3218  )
  3219  
  3220  // fetchPendingOpenChannels queries the database for a list of channels that
  3221  // have pending open state. The returned result is used in the response of the
  3222  // PendingChannels RPC.
  3223  func (r *rpcServer) fetchPendingOpenChannels() (pendingOpenChannels, error) {
  3224  	// First, we'll populate the response with all the channels that are
  3225  	// soon to be opened. We can easily fetch this data from the database
  3226  	// and map the db struct to the proto response.
  3227  	channels, err := r.server.chanStateDB.FetchPendingChannels()
  3228  	if err != nil {
  3229  		rpcsLog.Errorf("unable to fetch pending channels: %v", err)
  3230  		return nil, err
  3231  	}
  3232  
  3233  	result := make(pendingOpenChannels, len(channels))
  3234  	for i, pendingChan := range channels {
  3235  		pub := pendingChan.IdentityPub.SerializeCompressed()
  3236  
  3237  		// As this is required for display purposes, we'll calculate
  3238  		// the size of the commitment transaction. We also add on the
  3239  		// estimated size of the witness to calculate the size of the
  3240  		// transaction if it were to be immediately unilaterally
  3241  		// broadcast.
  3242  		// TODO(roasbeef): query for funding tx from wallet, display
  3243  		// that also?
  3244  		localCommitment := pendingChan.LocalCommitment
  3245  		utx := localCommitment.CommitTx
  3246  		commitBaseSize := int64(utx.SerializeSize())
  3247  		commitSize := commitBaseSize + 1 + input.FundingOutputSigScriptSize
  3248  
  3249  		result[i] = &lnrpc.PendingChannelsResponse_PendingOpenChannel{
  3250  			Channel: &lnrpc.PendingChannelsResponse_PendingChannel{
  3251  				RemoteNodePub:          hex.EncodeToString(pub),
  3252  				ChannelPoint:           pendingChan.FundingOutpoint.String(),
  3253  				Capacity:               int64(pendingChan.Capacity),
  3254  				LocalBalance:           int64(localCommitment.LocalBalance.ToAtoms()),
  3255  				RemoteBalance:          int64(localCommitment.RemoteBalance.ToAtoms()),
  3256  				LocalChanReserveAtoms:  int64(pendingChan.LocalChanCfg.ChanReserve),
  3257  				RemoteChanReserveAtoms: int64(pendingChan.RemoteChanCfg.ChanReserve),
  3258  				Initiator:              rpcInitiator(pendingChan.IsInitiator),
  3259  				CommitmentType:         rpcCommitmentType(pendingChan.ChanType),
  3260  				ShortChanId:            pendingChan.ShortChannelID.String(),
  3261  			},
  3262  			CommitSize: commitSize,
  3263  			CommitFee:  int64(localCommitment.CommitFee),
  3264  			FeePerKb:   int64(localCommitment.FeePerKB),
  3265  			// TODO(roasbeef): need to track confirmation height
  3266  		}
  3267  	}
  3268  
  3269  	return result, nil
  3270  }
  3271  
  3272  // fetchPendingForceCloseChannels queries the database for a list of channels
  3273  // that have their closing transactions confirmed but not fully resolved yet.
  3274  // The returned result is used in the response of the PendingChannels RPC.
  3275  func (r *rpcServer) fetchPendingForceCloseChannels() (pendingForceClose,
  3276  	int64, error) {
  3277  
  3278  	_, currentHeight, err := r.server.cc.ChainIO.GetBestBlock()
  3279  	if err != nil {
  3280  		return nil, 0, err
  3281  	}
  3282  
  3283  	// Next, we'll examine the channels that are soon to be closed so we
  3284  	// can populate these fields within the response.
  3285  	channels, err := r.server.chanStateDB.FetchClosedChannels(true)
  3286  	if err != nil {
  3287  		rpcsLog.Errorf("unable to fetch closed channels: %v", err)
  3288  		return nil, 0, err
  3289  	}
  3290  
  3291  	result := make(pendingForceClose, 0)
  3292  	limboBalance := int64(0)
  3293  
  3294  	for _, pendingClose := range channels {
  3295  		// First construct the channel struct itself, this will be
  3296  		// needed regardless of how this channel was closed.
  3297  		pub := pendingClose.RemotePub.SerializeCompressed()
  3298  		chanPoint := pendingClose.ChanPoint
  3299  
  3300  		// Create the pending channel. If this channel was closed before
  3301  		// we started storing historical channel data, we will not know
  3302  		// who initiated the channel, so we set the initiator field to
  3303  		// unknown.
  3304  		channel := &lnrpc.PendingChannelsResponse_PendingChannel{
  3305  			RemoteNodePub:  hex.EncodeToString(pub),
  3306  			ChannelPoint:   chanPoint.String(),
  3307  			Capacity:       int64(pendingClose.Capacity),
  3308  			LocalBalance:   int64(pendingClose.SettledBalance),
  3309  			CommitmentType: lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE,
  3310  			Initiator:      lnrpc.Initiator_INITIATOR_UNKNOWN,
  3311  			ShortChanId:    pendingClose.ShortChanID.String(),
  3312  		}
  3313  
  3314  		// Lookup the channel in the historical channel bucket to obtain
  3315  		// initiator information. If the historical channel bucket was
  3316  		// not found, or the channel itself, this channel was closed
  3317  		// in a version before we started persisting historical
  3318  		// channels, so we silence the error.
  3319  		historical, err := r.server.chanStateDB.FetchHistoricalChannel(
  3320  			&pendingClose.ChanPoint,
  3321  		)
  3322  		switch err {
  3323  		// If the channel was closed in a version that did not record
  3324  		// historical channels, ignore the error.
  3325  		case channeldb.ErrNoHistoricalBucket:
  3326  		case channeldb.ErrChannelNotFound:
  3327  
  3328  		case nil:
  3329  			channel.Initiator = rpcInitiator(historical.IsInitiator)
  3330  			channel.CommitmentType = rpcCommitmentType(
  3331  				historical.ChanType,
  3332  			)
  3333  
  3334  			// Get the number of forwarding packages from the
  3335  			// historical channel.
  3336  			fwdPkgs, err := historical.LoadFwdPkgs()
  3337  			if err != nil {
  3338  				rpcsLog.Errorf("unable to load forwarding "+
  3339  					"packages for channel:%s, %v",
  3340  					historical.ShortChannelID, err)
  3341  				return nil, 0, err
  3342  			}
  3343  			channel.NumForwardingPackages = int64(len(fwdPkgs))
  3344  
  3345  		// If the error is non-nil, and not due to older versions of lnd
  3346  		// not persisting historical channels, return it.
  3347  		default:
  3348  			return nil, 0, err
  3349  		}
  3350  
  3351  		closeTXID := pendingClose.ClosingTXID.String()
  3352  
  3353  		switch pendingClose.CloseType {
  3354  
  3355  		// A coop closed channel should never be in the "pending close"
  3356  		// state. If a node upgraded from an older lnd version in the
  3357  		// middle of a their channel confirming, it will be in this
  3358  		// state. We log a warning that the channel will not be included
  3359  		// in the now deprecated pending close channels field.
  3360  		case channeldb.CooperativeClose:
  3361  			rpcsLog.Warnf("channel %v cooperatively closed and "+
  3362  				"in pending close state",
  3363  				pendingClose.ChanPoint)
  3364  
  3365  		// If the channel was force closed, then we'll need to query
  3366  		// the contractcourt.UtxoNursery for additional information.
  3367  		// TODO(halseth): distinguish remote and local case?
  3368  		case channeldb.LocalForceClose, channeldb.RemoteForceClose:
  3369  			forceClose := &lnrpc.PendingChannelsResponse_ForceClosedChannel{
  3370  				Channel:     channel,
  3371  				ClosingTxid: closeTXID,
  3372  			}
  3373  
  3374  			// Fetch reports from both nursery and resolvers. At the
  3375  			// moment this is not an atomic snapshot. This is
  3376  			// planned to be resolved when the nursery is removed
  3377  			// and channel arbitrator will be the single source for
  3378  			// these kind of reports.
  3379  			err := r.nurseryPopulateForceCloseResp(
  3380  				&chanPoint, currentHeight, forceClose,
  3381  			)
  3382  			if err != nil {
  3383  				return nil, 0, err
  3384  			}
  3385  
  3386  			err = r.arbitratorPopulateForceCloseResp(
  3387  				&chanPoint, currentHeight, forceClose,
  3388  			)
  3389  			if err != nil {
  3390  				return nil, 0, err
  3391  			}
  3392  
  3393  			limboBalance += forceClose.LimboBalance
  3394  			result = append(result, forceClose)
  3395  		}
  3396  	}
  3397  
  3398  	return result, limboBalance, nil
  3399  }
  3400  
  3401  // fetchWaitingCloseChannels queries the database for a list of channels
  3402  // that have their closing transactions broadcast but not confirmed yet.
  3403  // The returned result is used in the response of the PendingChannels RPC.
  3404  func (r *rpcServer) fetchWaitingCloseChannels() (waitingCloseChannels,
  3405  	int64, error) {
  3406  
  3407  	// We'll also fetch all channels that are open, but have had their
  3408  	// commitment broadcasted, meaning they are waiting for the closing
  3409  	// transaction to confirm.
  3410  	channels, err := r.server.chanStateDB.FetchWaitingCloseChannels()
  3411  	if err != nil {
  3412  		rpcsLog.Errorf("unable to fetch channels waiting close: %v",
  3413  			err)
  3414  		return nil, 0, err
  3415  	}
  3416  
  3417  	result := make(waitingCloseChannels, 0)
  3418  	limboBalance := int64(0)
  3419  
  3420  	// getClosingTx is a helper closure that tries to find the closing txid
  3421  	// of a given waiting close channel. Notice that if the remote closes
  3422  	// the channel, we may not have the closing txid.
  3423  	getClosingTx := func(c *channeldb.OpenChannel) (string, error) {
  3424  		var (
  3425  			tx  *wire.MsgTx
  3426  			err error
  3427  		)
  3428  
  3429  		// First, we try to locate the force closing txid. If not
  3430  		// found, we will then try to find its coop closing txid.
  3431  		tx, err = c.BroadcastedCommitment()
  3432  		if err == nil {
  3433  			return tx.TxHash().String(), nil
  3434  		}
  3435  
  3436  		// If the error returned is not ErrNoCloseTx, something
  3437  		// unexpected happened and we will return the error.
  3438  		if err != channeldb.ErrNoCloseTx {
  3439  			return "", err
  3440  		}
  3441  
  3442  		// Otherwise, we continue to locate its coop closing txid.
  3443  		tx, err = c.BroadcastedCooperative()
  3444  		if err == nil {
  3445  			return tx.TxHash().String(), nil
  3446  		}
  3447  
  3448  		// Return the error if it's not ErrNoCloseTx.
  3449  		if err != channeldb.ErrNoCloseTx {
  3450  			return "", err
  3451  		}
  3452  
  3453  		// Otherwise return an empty txid. This can happen if the
  3454  		// remote broadcast the closing txid and we haven't recorded it
  3455  		// yet.
  3456  		return "", nil
  3457  	}
  3458  
  3459  	for _, waitingClose := range channels {
  3460  		pub := waitingClose.IdentityPub.SerializeCompressed()
  3461  		chanPoint := waitingClose.FundingOutpoint
  3462  
  3463  		var commitments lnrpc.PendingChannelsResponse_Commitments
  3464  
  3465  		// Report local commit. May not be present when DLP is active.
  3466  		if waitingClose.LocalCommitment.CommitTx != nil {
  3467  			commitments.LocalTxid =
  3468  				waitingClose.LocalCommitment.CommitTx.TxHash().
  3469  					String()
  3470  
  3471  			commitments.LocalCommitFeeAtoms = uint64(
  3472  				waitingClose.LocalCommitment.CommitFee,
  3473  			)
  3474  		}
  3475  
  3476  		// Report remote commit. May not be present when DLP is active.
  3477  		if waitingClose.RemoteCommitment.CommitTx != nil {
  3478  			commitments.RemoteTxid =
  3479  				waitingClose.RemoteCommitment.CommitTx.TxHash().
  3480  					String()
  3481  
  3482  			commitments.RemoteCommitFeeAtoms = uint64(
  3483  				waitingClose.RemoteCommitment.CommitFee,
  3484  			)
  3485  		}
  3486  
  3487  		// Report the remote pending commit if any.
  3488  		remoteCommitDiff, err := waitingClose.RemoteCommitChainTip()
  3489  
  3490  		switch {
  3491  
  3492  		// Don't set hash if there is no pending remote commit.
  3493  		case err == channeldb.ErrNoPendingCommit:
  3494  
  3495  		// An unexpected error occurred.
  3496  		case err != nil:
  3497  			return nil, 0, err
  3498  
  3499  		// There is a pending remote commit. Set its hash in the
  3500  		// response.
  3501  		default:
  3502  			hash := remoteCommitDiff.Commitment.CommitTx.TxHash()
  3503  			commitments.RemotePendingTxid = hash.String()
  3504  			commitments.RemoteCommitFeeAtoms = uint64(
  3505  				remoteCommitDiff.Commitment.CommitFee,
  3506  			)
  3507  		}
  3508  
  3509  		fwdPkgs, err := waitingClose.LoadFwdPkgs()
  3510  		if err != nil {
  3511  			rpcsLog.Errorf("unable to load forwarding packages "+
  3512  				"for channel:%s, %v",
  3513  				waitingClose.ShortChannelID, err)
  3514  			return nil, 0, err
  3515  		}
  3516  
  3517  		// Get the closing txid.
  3518  		// NOTE: the closing txid could be empty here if it's the
  3519  		// remote broadcasted the closing tx.
  3520  		closingTxid, err := getClosingTx(waitingClose)
  3521  		if err != nil {
  3522  			rpcsLog.Errorf("unable to find closing txid for "+
  3523  				"channel:%s, %v",
  3524  				waitingClose.ShortChannelID, err)
  3525  			return nil, 0, err
  3526  		}
  3527  
  3528  		channel := &lnrpc.PendingChannelsResponse_PendingChannel{
  3529  			RemoteNodePub:          hex.EncodeToString(pub),
  3530  			ChannelPoint:           chanPoint.String(),
  3531  			Capacity:               int64(waitingClose.Capacity),
  3532  			LocalBalance:           int64(waitingClose.LocalCommitment.LocalBalance.ToAtoms()),
  3533  			RemoteBalance:          int64(waitingClose.LocalCommitment.RemoteBalance.ToAtoms()),
  3534  			LocalChanReserveAtoms:  int64(waitingClose.LocalChanCfg.ChanReserve),
  3535  			RemoteChanReserveAtoms: int64(waitingClose.RemoteChanCfg.ChanReserve),
  3536  			Initiator:              rpcInitiator(waitingClose.IsInitiator),
  3537  			CommitmentType:         rpcCommitmentType(waitingClose.ChanType),
  3538  			ShortChanId:            waitingClose.ShortChannelID.String(),
  3539  			NumForwardingPackages:  int64(len(fwdPkgs)),
  3540  			ChanStatusFlags:        waitingClose.ChanStatus().String(),
  3541  		}
  3542  
  3543  		waitingCloseResp := &lnrpc.PendingChannelsResponse_WaitingCloseChannel{
  3544  			Channel:      channel,
  3545  			LimboBalance: channel.LocalBalance,
  3546  			Commitments:  &commitments,
  3547  			ClosingTxid:  closingTxid,
  3548  		}
  3549  
  3550  		// A close tx has been broadcasted, all our balance will be in
  3551  		// limbo until it confirms.
  3552  		result = append(result, waitingCloseResp)
  3553  		limboBalance += channel.LocalBalance
  3554  	}
  3555  
  3556  	return result, limboBalance, nil
  3557  }
  3558  
  3559  // PendingChannels returns a list of all the channels that are currently
  3560  // considered "pending". A channel is pending if it has finished the funding
  3561  // workflow and is waiting for confirmations for the funding txn, or is in the
  3562  // process of closure, either initiated cooperatively or non-cooperatively.
  3563  func (r *rpcServer) PendingChannels(ctx context.Context,
  3564  	in *lnrpc.PendingChannelsRequest) (
  3565  	*lnrpc.PendingChannelsResponse, error) {
  3566  
  3567  	rpcsLog.Debugf("[pendingchannels]")
  3568  
  3569  	resp := &lnrpc.PendingChannelsResponse{}
  3570  
  3571  	// First, we find all the channels that will soon be opened.
  3572  	pendingOpenChannels, err := r.fetchPendingOpenChannels()
  3573  	if err != nil {
  3574  		return nil, err
  3575  	}
  3576  	resp.PendingOpenChannels = pendingOpenChannels
  3577  
  3578  	// Second, we fetch all channels that considered pending force closing.
  3579  	// This means the channels here have their closing transactions
  3580  	// confirmed but not considered fully resolved yet. For instance, they
  3581  	// may have a second level HTLCs to be resolved onchain.
  3582  	pendingCloseChannels, limbo, err := r.fetchPendingForceCloseChannels()
  3583  	if err != nil {
  3584  		return nil, err
  3585  	}
  3586  	resp.PendingForceClosingChannels = pendingCloseChannels
  3587  	resp.TotalLimboBalance = limbo
  3588  
  3589  	// Third, we fetch all channels that are open, but have had their
  3590  	// commitment broadcasted, meaning they are waiting for the closing
  3591  	// transaction to confirm.
  3592  	waitingCloseChannels, limbo, err := r.fetchWaitingCloseChannels()
  3593  	if err != nil {
  3594  		return nil, err
  3595  	}
  3596  	resp.WaitingCloseChannels = waitingCloseChannels
  3597  	resp.TotalLimboBalance += limbo
  3598  
  3599  	return resp, nil
  3600  }
  3601  
  3602  // arbitratorPopulateForceCloseResp populates the pending channels response
  3603  // message with channel resolution information from the contract resolvers.
  3604  func (r *rpcServer) arbitratorPopulateForceCloseResp(chanPoint *wire.OutPoint,
  3605  	currentHeight int32,
  3606  	forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
  3607  
  3608  	// Query for contract resolvers state.
  3609  	arbitrator, err := r.server.chainArb.GetChannelArbitrator(*chanPoint)
  3610  	if err != nil {
  3611  		return err
  3612  	}
  3613  	reports := arbitrator.Report()
  3614  
  3615  	for _, report := range reports {
  3616  		switch report.Type {
  3617  
  3618  		// For a direct output, populate/update the top level
  3619  		// response properties.
  3620  		case contractcourt.ReportOutputUnencumbered:
  3621  			// Populate the maturity height fields for the direct
  3622  			// commitment output to us.
  3623  			forceClose.MaturityHeight = report.MaturityHeight
  3624  
  3625  			// If the transaction has been confirmed, then we can
  3626  			// compute how many blocks it has left.
  3627  			if forceClose.MaturityHeight != 0 {
  3628  				forceClose.BlocksTilMaturity =
  3629  					int32(forceClose.MaturityHeight) -
  3630  						currentHeight
  3631  			}
  3632  
  3633  		// Add htlcs to the PendingHtlcs response property.
  3634  		case contractcourt.ReportOutputIncomingHtlc,
  3635  			contractcourt.ReportOutputOutgoingHtlc:
  3636  
  3637  			// Don't report details on htlcs that are no longer in
  3638  			// limbo.
  3639  			if report.LimboBalance == 0 {
  3640  				break
  3641  			}
  3642  
  3643  			incoming := report.Type == contractcourt.ReportOutputIncomingHtlc
  3644  			htlc := &lnrpc.PendingHTLC{
  3645  				Incoming:       incoming,
  3646  				Amount:         int64(report.Amount),
  3647  				Outpoint:       report.Outpoint.String(),
  3648  				MaturityHeight: report.MaturityHeight,
  3649  				Stage:          report.Stage,
  3650  			}
  3651  
  3652  			if htlc.MaturityHeight != 0 {
  3653  				htlc.BlocksTilMaturity =
  3654  					int32(htlc.MaturityHeight) - currentHeight
  3655  			}
  3656  
  3657  			forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, htlc)
  3658  
  3659  		case contractcourt.ReportOutputAnchor:
  3660  			// There are three resolution states for the anchor:
  3661  			// limbo, lost and recovered. Derive the current state
  3662  			// from the limbo and recovered balances.
  3663  			switch {
  3664  
  3665  			case report.RecoveredBalance != 0:
  3666  				forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_RECOVERED
  3667  
  3668  			case report.LimboBalance != 0:
  3669  				forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LIMBO
  3670  
  3671  			default:
  3672  				forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LOST
  3673  			}
  3674  
  3675  		default:
  3676  			return fmt.Errorf("unknown report output type: %v",
  3677  				report.Type)
  3678  		}
  3679  
  3680  		forceClose.LimboBalance += int64(report.LimboBalance)
  3681  		forceClose.RecoveredBalance += int64(report.RecoveredBalance)
  3682  	}
  3683  
  3684  	return nil
  3685  }
  3686  
  3687  // nurseryPopulateForceCloseResp populates the pending channels response
  3688  // message with contract resolution information from utxonursery.
  3689  func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint,
  3690  	currentHeight int32,
  3691  	forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
  3692  
  3693  	// Query for the maturity state for this force closed channel. If we
  3694  	// didn't have any time-locked outputs, then the nursery may not know of
  3695  	// the contract.
  3696  	nurseryInfo, err := r.server.utxoNursery.NurseryReport(chanPoint)
  3697  	if err == contractcourt.ErrContractNotFound {
  3698  		return nil
  3699  	}
  3700  	if err != nil {
  3701  		return fmt.Errorf("unable to obtain "+
  3702  			"nursery report for ChannelPoint(%v): %v",
  3703  			chanPoint, err)
  3704  	}
  3705  
  3706  	// If the nursery knows of this channel, then we can populate
  3707  	// information detailing exactly how much funds are time locked and also
  3708  	// the height in which we can ultimately sweep the funds into the
  3709  	// wallet.
  3710  	forceClose.LimboBalance = int64(nurseryInfo.LimboBalance)
  3711  	forceClose.RecoveredBalance = int64(nurseryInfo.RecoveredBalance)
  3712  
  3713  	for _, htlcReport := range nurseryInfo.Htlcs {
  3714  		// TODO(conner) set incoming flag appropriately after handling
  3715  		// incoming incubation
  3716  		htlc := &lnrpc.PendingHTLC{
  3717  			Incoming:       false,
  3718  			Amount:         int64(htlcReport.Amount),
  3719  			Outpoint:       htlcReport.Outpoint.String(),
  3720  			MaturityHeight: htlcReport.MaturityHeight,
  3721  			Stage:          htlcReport.Stage,
  3722  		}
  3723  
  3724  		if htlc.MaturityHeight != 0 {
  3725  			htlc.BlocksTilMaturity =
  3726  				int32(htlc.MaturityHeight) -
  3727  					currentHeight
  3728  		}
  3729  
  3730  		forceClose.PendingHtlcs = append(forceClose.PendingHtlcs,
  3731  			htlc)
  3732  	}
  3733  
  3734  	return nil
  3735  }
  3736  
  3737  // ClosedChannels returns a list of all the channels have been closed.
  3738  // This does not include channels that are still in the process of closing.
  3739  func (r *rpcServer) ClosedChannels(ctx context.Context,
  3740  	in *lnrpc.ClosedChannelsRequest) (*lnrpc.ClosedChannelsResponse,
  3741  	error) {
  3742  
  3743  	// Show all channels when no filter flags are set.
  3744  	filterResults := in.Cooperative || in.LocalForce ||
  3745  		in.RemoteForce || in.Breach || in.FundingCanceled ||
  3746  		in.Abandoned
  3747  
  3748  	resp := &lnrpc.ClosedChannelsResponse{}
  3749  
  3750  	dbChannels, err := r.server.chanStateDB.FetchClosedChannels(false)
  3751  	if err != nil {
  3752  		return nil, err
  3753  	}
  3754  
  3755  	// In order to make the response easier to parse for clients, we'll
  3756  	// sort the set of closed channels by their closing height before
  3757  	// serializing the proto response.
  3758  	sort.Slice(dbChannels, func(i, j int) bool {
  3759  		return dbChannels[i].CloseHeight < dbChannels[j].CloseHeight
  3760  	})
  3761  
  3762  	for _, dbChannel := range dbChannels {
  3763  		if dbChannel.IsPending {
  3764  			continue
  3765  		}
  3766  
  3767  		switch dbChannel.CloseType {
  3768  		case channeldb.CooperativeClose:
  3769  			if filterResults && !in.Cooperative {
  3770  				continue
  3771  			}
  3772  		case channeldb.LocalForceClose:
  3773  			if filterResults && !in.LocalForce {
  3774  				continue
  3775  			}
  3776  		case channeldb.RemoteForceClose:
  3777  			if filterResults && !in.RemoteForce {
  3778  				continue
  3779  			}
  3780  		case channeldb.BreachClose:
  3781  			if filterResults && !in.Breach {
  3782  				continue
  3783  			}
  3784  		case channeldb.FundingCanceled:
  3785  			if filterResults && !in.FundingCanceled {
  3786  				continue
  3787  			}
  3788  		case channeldb.Abandoned:
  3789  			if filterResults && !in.Abandoned {
  3790  				continue
  3791  			}
  3792  		}
  3793  
  3794  		channel, err := r.createRPCClosedChannel(dbChannel)
  3795  		if err != nil {
  3796  			return nil, err
  3797  		}
  3798  
  3799  		resp.Channels = append(resp.Channels, channel)
  3800  	}
  3801  
  3802  	return resp, nil
  3803  }
  3804  
  3805  // ListChannels returns a description of all the open channels that this node
  3806  // is a participant in.
  3807  func (r *rpcServer) ListChannels(ctx context.Context,
  3808  	in *lnrpc.ListChannelsRequest) (*lnrpc.ListChannelsResponse, error) {
  3809  
  3810  	if in.ActiveOnly && in.InactiveOnly {
  3811  		return nil, fmt.Errorf("either `active_only` or " +
  3812  			"`inactive_only` can be set, but not both")
  3813  	}
  3814  
  3815  	if in.PublicOnly && in.PrivateOnly {
  3816  		return nil, fmt.Errorf("either `public_only` or " +
  3817  			"`private_only` can be set, but not both")
  3818  	}
  3819  
  3820  	if len(in.Peer) > 0 && len(in.Peer) != 33 {
  3821  		_, err := route.NewVertexFromBytes(in.Peer)
  3822  		return nil, fmt.Errorf("invalid `peer` key: %v", err)
  3823  	}
  3824  
  3825  	resp := &lnrpc.ListChannelsResponse{}
  3826  
  3827  	graph := r.server.graphDB
  3828  
  3829  	dbChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
  3830  	if err != nil {
  3831  		return nil, err
  3832  	}
  3833  
  3834  	rpcsLog.Debugf("[listchannels] fetched %v channels from DB",
  3835  		len(dbChannels))
  3836  
  3837  	for _, dbChannel := range dbChannels {
  3838  		nodePub := dbChannel.IdentityPub
  3839  		nodePubBytes := nodePub.SerializeCompressed()
  3840  		chanPoint := dbChannel.FundingOutpoint
  3841  
  3842  		// If the caller requested channels for a target node, skip any
  3843  		// that don't match the provided pubkey.
  3844  		if len(in.Peer) > 0 && !bytes.Equal(nodePubBytes, in.Peer) {
  3845  			continue
  3846  		}
  3847  
  3848  		var peerOnline bool
  3849  		if _, err := r.server.FindPeer(nodePub); err == nil {
  3850  			peerOnline = true
  3851  		}
  3852  
  3853  		channelID := lnwire.NewChanIDFromOutPoint(&chanPoint)
  3854  		var linkActive bool
  3855  		if link, err := r.server.htlcSwitch.GetLink(channelID); err == nil {
  3856  			// A channel is only considered active if it is known
  3857  			// by the switch *and* able to forward
  3858  			// incoming/outgoing payments.
  3859  			linkActive = link.EligibleToForward()
  3860  		}
  3861  
  3862  		// Next, we'll determine whether we should add this channel to
  3863  		// our list depending on the type of channels requested to us.
  3864  		isActive := peerOnline && linkActive
  3865  		channel, err := createRPCOpenChannel(r, graph, dbChannel, isActive)
  3866  		if err != nil {
  3867  			return nil, err
  3868  		}
  3869  
  3870  		// We'll only skip returning this channel if we were requested
  3871  		// for a specific kind and this channel doesn't satisfy it.
  3872  		switch {
  3873  		case in.ActiveOnly && !isActive:
  3874  			continue
  3875  		case in.InactiveOnly && isActive:
  3876  			continue
  3877  		case in.PublicOnly && channel.Private:
  3878  			continue
  3879  		case in.PrivateOnly && !channel.Private:
  3880  			continue
  3881  		}
  3882  
  3883  		resp.Channels = append(resp.Channels, channel)
  3884  	}
  3885  
  3886  	return resp, nil
  3887  }
  3888  
  3889  // rpcCommitmentType takes the channel type and converts it to an rpc commitment
  3890  // type value.
  3891  func rpcCommitmentType(chanType channeldb.ChannelType) lnrpc.CommitmentType {
  3892  	// Extract the commitment type from the channel type flags. We must
  3893  	// first check whether it has anchors, since in that case it would also
  3894  	// be tweakless.
  3895  	if chanType.HasLeaseExpiration() {
  3896  		return lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
  3897  	}
  3898  
  3899  	if chanType.HasAnchors() {
  3900  		return lnrpc.CommitmentType_ANCHORS
  3901  	}
  3902  
  3903  	if chanType.IsTweakless() {
  3904  		return lnrpc.CommitmentType_STATIC_REMOTE_KEY
  3905  	}
  3906  
  3907  	return lnrpc.CommitmentType_LEGACY
  3908  }
  3909  
  3910  // createChannelConstraint creates a *lnrpc.ChannelConstraints using the
  3911  // *Channeldb.ChannelConfig.
  3912  func createChannelConstraint(
  3913  	chanCfg *channeldb.ChannelConfig) *lnrpc.ChannelConstraints {
  3914  
  3915  	return &lnrpc.ChannelConstraints{
  3916  		CsvDelay:            uint32(chanCfg.CsvDelay),
  3917  		ChanReserveAtoms:    uint64(chanCfg.ChanReserve),
  3918  		DustLimitAtoms:      uint64(chanCfg.DustLimit),
  3919  		MaxPendingAmtMAtoms: uint64(chanCfg.MaxPendingAmount),
  3920  		MinHtlcMAtoms:       uint64(chanCfg.MinHTLC),
  3921  		MaxAcceptedHtlcs:    uint32(chanCfg.MaxAcceptedHtlcs),
  3922  	}
  3923  }
  3924  
  3925  // createRPCOpenChannel creates an *lnrpc.Channel from the *channeldb.Channel.
  3926  func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph,
  3927  	dbChannel *channeldb.OpenChannel, isActive bool) (*lnrpc.Channel, error) {
  3928  
  3929  	nodePub := dbChannel.IdentityPub
  3930  	nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
  3931  	chanPoint := dbChannel.FundingOutpoint
  3932  
  3933  	// Next, we'll determine whether the channel is public or not.
  3934  	isPublic := dbChannel.ChannelFlags&lnwire.FFAnnounceChannel != 0
  3935  
  3936  	// As this is required for display purposes, we'll calculate
  3937  	// the size of the commitment transaction. We also add on the
  3938  	// estimated size of the witness to calculate the size of the
  3939  	// transaction if it were to be immediately unilaterally
  3940  	// broadcast.
  3941  	localCommit := dbChannel.LocalCommitment
  3942  	utx := localCommit.CommitTx
  3943  	commitBaseSize := int64(utx.SerializeSize())
  3944  	commitSize := commitBaseSize + 1 + input.FundingOutputSigScriptSize
  3945  
  3946  	localBalance := localCommit.LocalBalance
  3947  	remoteBalance := localCommit.RemoteBalance
  3948  
  3949  	// As an artifact of our usage of milli-atoms internally, either party
  3950  	// may end up in a state where they're holding a fractional
  3951  	// amount of atoms which can't be expressed within the
  3952  	// actual commitment output. Since we round down when going
  3953  	// from milli-atoms -> Atoms, we may at any point be adding an
  3954  	// additional Atoms to miners fees. As a result, we display a
  3955  	// commitment fee that accounts for this externally.
  3956  	var sumOutputs dcrutil.Amount
  3957  	for _, txOut := range localCommit.CommitTx.TxOut {
  3958  		sumOutputs += dcrutil.Amount(txOut.Value)
  3959  	}
  3960  	externalCommitFee := dbChannel.Capacity - sumOutputs
  3961  
  3962  	// Extract the commitment type from the channel type flags.
  3963  	commitmentType := rpcCommitmentType(dbChannel.ChanType)
  3964  
  3965  	channel := &lnrpc.Channel{
  3966  		Active:             isActive,
  3967  		Private:            !isPublic,
  3968  		RemotePubkey:       nodeID,
  3969  		ChannelPoint:       chanPoint.String(),
  3970  		ChanId:             dbChannel.ShortChannelID.ToUint64(),
  3971  		Capacity:           int64(dbChannel.Capacity),
  3972  		LocalBalance:       int64(localBalance.ToAtoms()),
  3973  		RemoteBalance:      int64(remoteBalance.ToAtoms()),
  3974  		CommitFee:          int64(externalCommitFee),
  3975  		CommitSize:         commitSize,
  3976  		FeePerKb:           int64(localCommit.FeePerKB),
  3977  		TotalAtomsSent:     int64(dbChannel.TotalMAtomsSent.ToAtoms()),
  3978  		TotalAtomsReceived: int64(dbChannel.TotalMAtomsReceived.ToAtoms()),
  3979  		NumUpdates:         localCommit.CommitHeight,
  3980  		PendingHtlcs:       make([]*lnrpc.HTLC, len(localCommit.Htlcs)),
  3981  		Initiator:          dbChannel.IsInitiator,
  3982  		ChanStatusFlags:    dbChannel.ChanStatus().String(),
  3983  		StaticRemoteKey:    commitmentType == lnrpc.CommitmentType_STATIC_REMOTE_KEY,
  3984  		CommitmentType:     commitmentType,
  3985  		ThawHeight:         dbChannel.ThawHeight,
  3986  		LocalConstraints: createChannelConstraint(
  3987  			&dbChannel.LocalChanCfg,
  3988  		),
  3989  		RemoteConstraints: createChannelConstraint(
  3990  			&dbChannel.RemoteChanCfg,
  3991  		),
  3992  		// TODO: remove the following deprecated fields
  3993  		CsvDelay:               uint32(dbChannel.LocalChanCfg.CsvDelay),
  3994  		LocalChanReserveAtoms:  int64(dbChannel.LocalChanCfg.ChanReserve),
  3995  		RemoteChanReserveAtoms: int64(dbChannel.RemoteChanCfg.ChanReserve),
  3996  	}
  3997  
  3998  	for i, htlc := range localCommit.Htlcs {
  3999  		var rHash [32]byte
  4000  		copy(rHash[:], htlc.RHash[:])
  4001  
  4002  		circuitMap := r.server.htlcSwitch.CircuitLookup()
  4003  
  4004  		var forwardingChannel, forwardingHtlcIndex uint64
  4005  		switch {
  4006  		case htlc.Incoming:
  4007  			circuit := circuitMap.LookupCircuit(
  4008  				htlcswitch.CircuitKey{
  4009  					ChanID: dbChannel.ShortChannelID,
  4010  					HtlcID: htlc.HtlcIndex,
  4011  				},
  4012  			)
  4013  			if circuit != nil && circuit.Outgoing != nil {
  4014  				forwardingChannel = circuit.Outgoing.ChanID.
  4015  					ToUint64()
  4016  
  4017  				forwardingHtlcIndex = circuit.Outgoing.HtlcID
  4018  			}
  4019  
  4020  		case !htlc.Incoming:
  4021  			circuit := circuitMap.LookupOpenCircuit(
  4022  				htlcswitch.CircuitKey{
  4023  					ChanID: dbChannel.ShortChannelID,
  4024  					HtlcID: htlc.HtlcIndex,
  4025  				},
  4026  			)
  4027  
  4028  			// If the incoming channel id is the special hop.Source
  4029  			// value, the htlc index is a local payment identifier.
  4030  			// In this case, report nothing.
  4031  			if circuit != nil &&
  4032  				circuit.Incoming.ChanID != hop.Source {
  4033  
  4034  				forwardingChannel = circuit.Incoming.ChanID.
  4035  					ToUint64()
  4036  
  4037  				forwardingHtlcIndex = circuit.Incoming.HtlcID
  4038  			}
  4039  		}
  4040  
  4041  		channel.PendingHtlcs[i] = &lnrpc.HTLC{
  4042  			Incoming:            htlc.Incoming,
  4043  			Amount:              int64(htlc.Amt.ToAtoms()),
  4044  			HashLock:            rHash[:],
  4045  			ExpirationHeight:    htlc.RefundTimeout,
  4046  			HtlcIndex:           htlc.HtlcIndex,
  4047  			ForwardingChannel:   forwardingChannel,
  4048  			ForwardingHtlcIndex: forwardingHtlcIndex,
  4049  		}
  4050  		channel.UnsettledBalance += channel.PendingHtlcs[i].Amount
  4051  	}
  4052  
  4053  	// Lookup our balances at height 0, because they will reflect any
  4054  	// push amounts that may have been present when this channel was
  4055  	// created.
  4056  	localBalance, remoteBalance, err := dbChannel.BalancesAtHeight(0)
  4057  	if err != nil {
  4058  		return nil, err
  4059  	}
  4060  
  4061  	// If we initiated opening the channel, the zero height remote balance
  4062  	// is the push amount. Otherwise, our starting balance is the push
  4063  	// amount. If there is no push amount, these values will simply be zero.
  4064  	if dbChannel.IsInitiator {
  4065  		channel.PushAmountAtoms = uint64(remoteBalance.ToAtoms())
  4066  	} else {
  4067  		channel.PushAmountAtoms = uint64(localBalance.ToAtoms())
  4068  	}
  4069  
  4070  	if len(dbChannel.LocalShutdownScript) > 0 {
  4071  		// TODO(decred): Store version along with LocalShutdownScript?
  4072  		scriptVersion := uint16(0)
  4073  		_, addresses := stdscript.ExtractAddrs(
  4074  			scriptVersion, dbChannel.LocalShutdownScript,
  4075  			r.cfg.ActiveNetParams.Params,
  4076  		)
  4077  
  4078  		// We only expect one upfront shutdown address for a channel. If
  4079  		// LocalShutdownScript is non-zero, there should be one payout
  4080  		// address set.
  4081  		if len(addresses) != 1 {
  4082  			return nil, fmt.Errorf("expected one upfront shutdown "+
  4083  				"address, got: %v", len(addresses))
  4084  		}
  4085  
  4086  		channel.CloseAddress = addresses[0].String()
  4087  	}
  4088  
  4089  	// Additional decred-specific data for channels.
  4090  	waitTime, err := dbChannel.Db.GetChanReestablishWaitTime(dbChannel.ShortChannelID)
  4091  	if err != nil {
  4092  		return nil, err
  4093  	}
  4094  	channel.ChanReestablishWaitTimeMs = waitTime.Milliseconds()
  4095  	channel.ShortChanId = dbChannel.ShortChannelID.String()
  4096  
  4097  	// If the server hasn't fully started yet, it's possible that the
  4098  	// channel event store hasn't either, so it won't be able to consume any
  4099  	// requests until then. To prevent blocking, we'll just omit the uptime
  4100  	// related fields for now.
  4101  	if !r.server.Started() {
  4102  		return channel, nil
  4103  	}
  4104  
  4105  	peer, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
  4106  	if err != nil {
  4107  		return nil, err
  4108  	}
  4109  
  4110  	// Query the event store for additional information about the channel.
  4111  	// Do not fail if it is not available, because there is a potential
  4112  	// race between a channel being added to our node and the event store
  4113  	// being notified of it.
  4114  	outpoint := dbChannel.FundingOutpoint
  4115  	info, err := r.server.chanEventStore.GetChanInfo(outpoint, peer)
  4116  	switch err {
  4117  	// If the store does not know about the channel, we just log it.
  4118  	case chanfitness.ErrChannelNotFound:
  4119  		rpcsLog.Infof("channel: %v not found by channel event store",
  4120  			outpoint)
  4121  
  4122  	// If we got our channel info, we further populate the channel.
  4123  	case nil:
  4124  		channel.Uptime = int64(info.Uptime.Seconds())
  4125  		channel.Lifetime = int64(info.Lifetime.Seconds())
  4126  
  4127  	// If we get an unexpected error, we return it.
  4128  	default:
  4129  		return nil, err
  4130  	}
  4131  
  4132  	return channel, nil
  4133  }
  4134  
  4135  // createRPCClosedChannel creates an *lnrpc.ClosedChannelSummary from a
  4136  // *channeldb.ChannelCloseSummary.
  4137  func (r *rpcServer) createRPCClosedChannel(
  4138  	dbChannel *channeldb.ChannelCloseSummary) (*lnrpc.ChannelCloseSummary, error) {
  4139  
  4140  	nodePub := dbChannel.RemotePub
  4141  	nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
  4142  
  4143  	var (
  4144  		closeType      lnrpc.ChannelCloseSummary_ClosureType
  4145  		openInit       lnrpc.Initiator
  4146  		closeInitiator lnrpc.Initiator
  4147  		err            error
  4148  	)
  4149  
  4150  	// Lookup local and remote cooperative initiators. If these values
  4151  	// are not known they will just return unknown.
  4152  	openInit, closeInitiator, err = r.getInitiators(
  4153  		&dbChannel.ChanPoint,
  4154  	)
  4155  	if err != nil {
  4156  		return nil, err
  4157  	}
  4158  
  4159  	// Convert the close type to rpc type.
  4160  	switch dbChannel.CloseType {
  4161  	case channeldb.CooperativeClose:
  4162  		closeType = lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
  4163  	case channeldb.LocalForceClose:
  4164  		closeType = lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE
  4165  	case channeldb.RemoteForceClose:
  4166  		closeType = lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE
  4167  	case channeldb.BreachClose:
  4168  		closeType = lnrpc.ChannelCloseSummary_BREACH_CLOSE
  4169  	case channeldb.FundingCanceled:
  4170  		closeType = lnrpc.ChannelCloseSummary_FUNDING_CANCELED
  4171  	case channeldb.Abandoned:
  4172  		closeType = lnrpc.ChannelCloseSummary_ABANDONED
  4173  	}
  4174  
  4175  	channel := &lnrpc.ChannelCloseSummary{
  4176  		Capacity:          int64(dbChannel.Capacity),
  4177  		RemotePubkey:      nodeID,
  4178  		ShortChanId:       dbChannel.ShortChanID.String(),
  4179  		CloseHeight:       dbChannel.CloseHeight,
  4180  		CloseType:         closeType,
  4181  		ChannelPoint:      dbChannel.ChanPoint.String(),
  4182  		ChanId:            dbChannel.ShortChanID.ToUint64(),
  4183  		SettledBalance:    int64(dbChannel.SettledBalance),
  4184  		TimeLockedBalance: int64(dbChannel.TimeLockedBalance),
  4185  		ChainHash:         dbChannel.ChainHash.String(),
  4186  		ClosingTxHash:     dbChannel.ClosingTXID.String(),
  4187  		OpenInitiator:     openInit,
  4188  		CloseInitiator:    closeInitiator,
  4189  	}
  4190  
  4191  	reports, err := r.server.miscDB.FetchChannelReports(
  4192  		r.cfg.ActiveNetParams.GenesisHash, &dbChannel.ChanPoint,
  4193  	)
  4194  	switch err {
  4195  	// If the channel does not have its resolver outcomes stored,
  4196  	// ignore it.
  4197  	case channeldb.ErrNoChainHashBucket:
  4198  		fallthrough
  4199  	case channeldb.ErrNoChannelSummaries:
  4200  		return channel, nil
  4201  
  4202  	// If there is no error, fallthrough the switch to process reports.
  4203  	case nil:
  4204  
  4205  	// If another error occurred, return it.
  4206  	default:
  4207  		return nil, err
  4208  	}
  4209  
  4210  	for _, report := range reports {
  4211  		rpcResolution, err := rpcChannelResolution(report)
  4212  		if err != nil {
  4213  			return nil, err
  4214  		}
  4215  
  4216  		channel.Resolutions = append(channel.Resolutions, rpcResolution)
  4217  	}
  4218  
  4219  	return channel, nil
  4220  }
  4221  
  4222  func rpcChannelResolution(report *channeldb.ResolverReport) (*lnrpc.Resolution,
  4223  	error) {
  4224  
  4225  	res := &lnrpc.Resolution{
  4226  		AmountAtoms: uint64(report.Amount),
  4227  		Outpoint: &lnrpc.OutPoint{
  4228  			OutputIndex: report.OutPoint.Index,
  4229  			TxidStr:     report.OutPoint.Hash.String(),
  4230  			TxidBytes:   report.OutPoint.Hash[:],
  4231  		},
  4232  	}
  4233  
  4234  	if report.SpendTxID != nil {
  4235  		res.SweepTxid = report.SpendTxID.String()
  4236  	}
  4237  
  4238  	switch report.ResolverType {
  4239  	case channeldb.ResolverTypeAnchor:
  4240  		res.ResolutionType = lnrpc.ResolutionType_ANCHOR
  4241  
  4242  	case channeldb.ResolverTypeIncomingHtlc:
  4243  		res.ResolutionType = lnrpc.ResolutionType_INCOMING_HTLC
  4244  
  4245  	case channeldb.ResolverTypeOutgoingHtlc:
  4246  		res.ResolutionType = lnrpc.ResolutionType_OUTGOING_HTLC
  4247  
  4248  	case channeldb.ResolverTypeCommit:
  4249  		res.ResolutionType = lnrpc.ResolutionType_COMMIT
  4250  
  4251  	default:
  4252  		return nil, fmt.Errorf("unknown resolver type: %v",
  4253  			report.ResolverType)
  4254  	}
  4255  
  4256  	switch report.ResolverOutcome {
  4257  	case channeldb.ResolverOutcomeClaimed:
  4258  		res.Outcome = lnrpc.ResolutionOutcome_CLAIMED
  4259  
  4260  	case channeldb.ResolverOutcomeUnclaimed:
  4261  		res.Outcome = lnrpc.ResolutionOutcome_UNCLAIMED
  4262  
  4263  	case channeldb.ResolverOutcomeAbandoned:
  4264  		res.Outcome = lnrpc.ResolutionOutcome_ABANDONED
  4265  
  4266  	case channeldb.ResolverOutcomeFirstStage:
  4267  		res.Outcome = lnrpc.ResolutionOutcome_FIRST_STAGE
  4268  
  4269  	case channeldb.ResolverOutcomeTimeout:
  4270  		res.Outcome = lnrpc.ResolutionOutcome_TIMEOUT
  4271  
  4272  	default:
  4273  		return nil, fmt.Errorf("unknown outcome: %v",
  4274  			report.ResolverOutcome)
  4275  	}
  4276  
  4277  	return res, nil
  4278  }
  4279  
  4280  // getInitiators returns an initiator enum that provides information about the
  4281  // party that initiated channel's open and close. This information is obtained
  4282  // from the historical channel bucket, so unknown values are returned when the
  4283  // channel is not present (which indicates that it was closed before we started
  4284  // writing channels to the historical close bucket).
  4285  func (r *rpcServer) getInitiators(chanPoint *wire.OutPoint) (
  4286  	lnrpc.Initiator,
  4287  	lnrpc.Initiator, error) {
  4288  
  4289  	var (
  4290  		openInitiator  = lnrpc.Initiator_INITIATOR_UNKNOWN
  4291  		closeInitiator = lnrpc.Initiator_INITIATOR_UNKNOWN
  4292  	)
  4293  
  4294  	// To get the close initiator for cooperative closes, we need
  4295  	// to get the channel status from the historical channel bucket.
  4296  	histChan, err := r.server.chanStateDB.FetchHistoricalChannel(chanPoint)
  4297  	switch {
  4298  	// The node has upgraded from a version where we did not store
  4299  	// historical channels, and has not closed a channel since. Do
  4300  	// not return an error, initiator values are unknown.
  4301  	case err == channeldb.ErrNoHistoricalBucket:
  4302  		return openInitiator, closeInitiator, nil
  4303  
  4304  	// The channel was closed before we started storing historical
  4305  	// channels. Do  not return an error, initiator values are unknown.
  4306  	case err == channeldb.ErrChannelNotFound:
  4307  		return openInitiator, closeInitiator, nil
  4308  
  4309  	case err != nil:
  4310  		return 0, 0, err
  4311  	}
  4312  
  4313  	// If we successfully looked up the channel, determine initiator based
  4314  	// on channels status.
  4315  	if histChan.IsInitiator {
  4316  		openInitiator = lnrpc.Initiator_INITIATOR_LOCAL
  4317  	} else {
  4318  		openInitiator = lnrpc.Initiator_INITIATOR_REMOTE
  4319  	}
  4320  
  4321  	localInit := histChan.HasChanStatus(
  4322  		channeldb.ChanStatusLocalCloseInitiator,
  4323  	)
  4324  
  4325  	remoteInit := histChan.HasChanStatus(
  4326  		channeldb.ChanStatusRemoteCloseInitiator,
  4327  	)
  4328  
  4329  	switch {
  4330  	// There is a possible case where closes were attempted by both parties.
  4331  	// We return the initiator as both in this case to provide full
  4332  	// information about the close.
  4333  	case localInit && remoteInit:
  4334  		closeInitiator = lnrpc.Initiator_INITIATOR_BOTH
  4335  
  4336  	case localInit:
  4337  		closeInitiator = lnrpc.Initiator_INITIATOR_LOCAL
  4338  
  4339  	case remoteInit:
  4340  		closeInitiator = lnrpc.Initiator_INITIATOR_REMOTE
  4341  	}
  4342  
  4343  	return openInitiator, closeInitiator, nil
  4344  }
  4345  
  4346  // SubscribeChannelEvents returns a uni-directional stream (server -> client)
  4347  // for notifying the client of newly active, inactive or closed channels.
  4348  func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
  4349  	updateStream lnrpc.Lightning_SubscribeChannelEventsServer) error {
  4350  
  4351  	channelEventSub, err := r.server.channelNotifier.SubscribeChannelEvents()
  4352  	if err != nil {
  4353  		return err
  4354  	}
  4355  
  4356  	// Ensure that the resources for the client is cleaned up once either
  4357  	// the server, or client exits.
  4358  	defer channelEventSub.Cancel()
  4359  
  4360  	graph := r.server.graphDB
  4361  
  4362  	for {
  4363  		select {
  4364  		// A new update has been sent by the channel router, we'll
  4365  		// marshal it into the form expected by the gRPC client, then
  4366  		// send it off to the client(s).
  4367  		case e := <-channelEventSub.Updates():
  4368  			var update *lnrpc.ChannelEventUpdate
  4369  			switch event := e.(type) {
  4370  			case channelnotifier.PendingOpenChannelEvent:
  4371  				update = &lnrpc.ChannelEventUpdate{
  4372  					Type: lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL,
  4373  					Channel: &lnrpc.ChannelEventUpdate_PendingOpenChannel{
  4374  						PendingOpenChannel: &lnrpc.PendingUpdate{
  4375  							Txid:        event.ChannelPoint.Hash[:],
  4376  							OutputIndex: event.ChannelPoint.Index,
  4377  						},
  4378  					},
  4379  				}
  4380  			case channelnotifier.OpenChannelEvent:
  4381  				channel, err := createRPCOpenChannel(r, graph,
  4382  					event.Channel, true)
  4383  				if err != nil {
  4384  					return err
  4385  				}
  4386  
  4387  				update = &lnrpc.ChannelEventUpdate{
  4388  					Type: lnrpc.ChannelEventUpdate_OPEN_CHANNEL,
  4389  					Channel: &lnrpc.ChannelEventUpdate_OpenChannel{
  4390  						OpenChannel: channel,
  4391  					},
  4392  				}
  4393  
  4394  			case channelnotifier.ClosedChannelEvent:
  4395  				closedChannel, err := r.createRPCClosedChannel(
  4396  					event.CloseSummary,
  4397  				)
  4398  				if err != nil {
  4399  					return err
  4400  				}
  4401  
  4402  				update = &lnrpc.ChannelEventUpdate{
  4403  					Type: lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
  4404  					Channel: &lnrpc.ChannelEventUpdate_ClosedChannel{
  4405  						ClosedChannel: closedChannel,
  4406  					},
  4407  				}
  4408  
  4409  			case channelnotifier.ActiveChannelEvent:
  4410  				update = &lnrpc.ChannelEventUpdate{
  4411  					Type: lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL,
  4412  					Channel: &lnrpc.ChannelEventUpdate_ActiveChannel{
  4413  						ActiveChannel: &lnrpc.ChannelPoint{
  4414  							FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  4415  								FundingTxidBytes: event.ChannelPoint.Hash[:],
  4416  							},
  4417  							OutputIndex: event.ChannelPoint.Index,
  4418  						},
  4419  					},
  4420  				}
  4421  
  4422  			case channelnotifier.InactiveChannelEvent:
  4423  				update = &lnrpc.ChannelEventUpdate{
  4424  					Type: lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
  4425  					Channel: &lnrpc.ChannelEventUpdate_InactiveChannel{
  4426  						InactiveChannel: &lnrpc.ChannelPoint{
  4427  							FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  4428  								FundingTxidBytes: event.ChannelPoint.Hash[:],
  4429  							},
  4430  							OutputIndex: event.ChannelPoint.Index,
  4431  						},
  4432  					},
  4433  				}
  4434  
  4435  			// Completely ignore ActiveLinkEvent as this is explicitly not
  4436  			// exposed to the RPC.
  4437  			case channelnotifier.ActiveLinkEvent:
  4438  				continue
  4439  
  4440  			case channelnotifier.FullyResolvedChannelEvent:
  4441  				update = &lnrpc.ChannelEventUpdate{
  4442  					Type: lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
  4443  					Channel: &lnrpc.ChannelEventUpdate_FullyResolvedChannel{
  4444  						FullyResolvedChannel: &lnrpc.ChannelPoint{
  4445  							FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  4446  								FundingTxidBytes: event.ChannelPoint.Hash[:],
  4447  							},
  4448  							OutputIndex: event.ChannelPoint.Index,
  4449  						},
  4450  					},
  4451  				}
  4452  
  4453  			default:
  4454  				return fmt.Errorf("unexpected channel event update: %v", event)
  4455  			}
  4456  
  4457  			if err := updateStream.Send(update); err != nil {
  4458  				return err
  4459  			}
  4460  
  4461  		// The response stream's context for whatever reason has been
  4462  		// closed. If context is closed by an exceeded deadline we will
  4463  		// return an error.
  4464  		case <-updateStream.Context().Done():
  4465  			if errors.Is(updateStream.Context().Err(), context.Canceled) {
  4466  				return nil
  4467  			}
  4468  			return updateStream.Context().Err()
  4469  
  4470  		case <-r.quit:
  4471  			return nil
  4472  		}
  4473  	}
  4474  }
  4475  
  4476  // paymentStream enables different types of payment streams, such as:
  4477  // lnrpc.Lightning_SendPaymentServer and lnrpc.Lightning_SendToRouteServer to
  4478  // execute sendPayment. We use this struct as a sort of bridge to enable code
  4479  // re-use between SendPayment and SendToRoute.
  4480  type paymentStream struct {
  4481  	recv func() (*rpcPaymentRequest, error)
  4482  	send func(*lnrpc.SendResponse) error
  4483  }
  4484  
  4485  // rpcPaymentRequest wraps lnrpc.SendRequest so that routes from
  4486  // lnrpc.SendToRouteRequest can be passed to sendPayment.
  4487  type rpcPaymentRequest struct {
  4488  	*lnrpc.SendRequest
  4489  	route *route.Route
  4490  }
  4491  
  4492  // SendPayment dispatches a bi-directional streaming RPC for sending payments
  4493  // through the Lightning Network. A single RPC invocation creates a persistent
  4494  // bi-directional stream allowing clients to rapidly send payments through the
  4495  // Lightning Network with a single persistent connection.
  4496  func (r *rpcServer) SendPayment(stream lnrpc.Lightning_SendPaymentServer) error {
  4497  	var lock sync.Mutex
  4498  
  4499  	return r.sendPayment(&paymentStream{
  4500  		recv: func() (*rpcPaymentRequest, error) {
  4501  			req, err := stream.Recv()
  4502  			if err != nil {
  4503  				return nil, err
  4504  			}
  4505  
  4506  			return &rpcPaymentRequest{
  4507  				SendRequest: req,
  4508  			}, nil
  4509  		},
  4510  		send: func(r *lnrpc.SendResponse) error {
  4511  			// Calling stream.Send concurrently is not safe.
  4512  			lock.Lock()
  4513  			defer lock.Unlock()
  4514  			return stream.Send(r)
  4515  		},
  4516  	})
  4517  }
  4518  
  4519  // SendToRoute dispatches a bi-directional streaming RPC for sending payments
  4520  // through the Lightning Network via predefined routes passed in. A single RPC
  4521  // invocation creates a persistent bi-directional stream allowing clients to
  4522  // rapidly send payments through the Lightning Network with a single persistent
  4523  // connection.
  4524  func (r *rpcServer) SendToRoute(stream lnrpc.Lightning_SendToRouteServer) error {
  4525  	var lock sync.Mutex
  4526  
  4527  	return r.sendPayment(&paymentStream{
  4528  		recv: func() (*rpcPaymentRequest, error) {
  4529  			req, err := stream.Recv()
  4530  			if err != nil {
  4531  				return nil, err
  4532  			}
  4533  
  4534  			return r.unmarshallSendToRouteRequest(req)
  4535  		},
  4536  		send: func(r *lnrpc.SendResponse) error {
  4537  			// Calling stream.Send concurrently is not safe.
  4538  			lock.Lock()
  4539  			defer lock.Unlock()
  4540  			return stream.Send(r)
  4541  		},
  4542  	})
  4543  }
  4544  
  4545  // unmarshallSendToRouteRequest unmarshalls an rpc sendtoroute request
  4546  func (r *rpcServer) unmarshallSendToRouteRequest(
  4547  	req *lnrpc.SendToRouteRequest) (*rpcPaymentRequest, error) {
  4548  
  4549  	if req.Route == nil {
  4550  		return nil, fmt.Errorf("unable to send, no route provided")
  4551  	}
  4552  
  4553  	route, err := r.routerBackend.UnmarshallRoute(req.Route)
  4554  	if err != nil {
  4555  		return nil, err
  4556  	}
  4557  
  4558  	return &rpcPaymentRequest{
  4559  		SendRequest: &lnrpc.SendRequest{
  4560  			PaymentHash:       req.PaymentHash,
  4561  			PaymentHashString: req.PaymentHashString,
  4562  		},
  4563  		route: route,
  4564  	}, nil
  4565  }
  4566  
  4567  // rpcPaymentIntent is a small wrapper struct around the of values we can
  4568  // receive from a client over RPC if they wish to send a payment. We'll either
  4569  // extract these fields from a payment request (which may include routing
  4570  // hints), or we'll get a fully populated route from the user that we'll pass
  4571  // directly to the channel router for dispatching.
  4572  type rpcPaymentIntent struct {
  4573  	mat                  lnwire.MilliAtom
  4574  	feeLimit             lnwire.MilliAtom
  4575  	cltvLimit            uint32
  4576  	dest                 route.Vertex
  4577  	rHash                [32]byte
  4578  	cltvDelta            uint16
  4579  	routeHints           [][]zpay32.HopHint
  4580  	outgoingChannelIDs   []uint64
  4581  	lastHop              *route.Vertex
  4582  	ignoreMaxOutboundAmt bool
  4583  	destFeatures         *lnwire.FeatureVector
  4584  	paymentAddr          *[32]byte
  4585  	payReq               []byte
  4586  
  4587  	destCustomRecords record.CustomSet
  4588  
  4589  	route *route.Route
  4590  }
  4591  
  4592  // extractPaymentIntent attempts to parse the complete details required to
  4593  // dispatch a client from the information presented by an RPC client. There are
  4594  // three ways a client can specify their payment details: a payment request,
  4595  // via manual details, or via a complete route.
  4596  func (r *rpcServer) extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) {
  4597  	payIntent := rpcPaymentIntent{
  4598  		ignoreMaxOutboundAmt: rpcPayReq.IgnoreMaxOutboundAmt,
  4599  	}
  4600  
  4601  	// If a route was specified, then we can use that directly.
  4602  	if rpcPayReq.route != nil {
  4603  		// If the user is using the REST interface, then they'll be
  4604  		// passing the payment hash as a hex encoded string.
  4605  		if rpcPayReq.PaymentHashString != "" {
  4606  			paymentHash, err := hex.DecodeString(
  4607  				rpcPayReq.PaymentHashString,
  4608  			)
  4609  			if err != nil {
  4610  				return payIntent, err
  4611  			}
  4612  
  4613  			copy(payIntent.rHash[:], paymentHash)
  4614  		} else {
  4615  			copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
  4616  		}
  4617  
  4618  		payIntent.route = rpcPayReq.route
  4619  		return payIntent, nil
  4620  	}
  4621  
  4622  	// If there are no routes specified, pass along a outgoing channel
  4623  	// restriction if specified. The main server rpc does not support
  4624  	// multiple channel restrictions.
  4625  	if rpcPayReq.OutgoingChanId != 0 {
  4626  		payIntent.outgoingChannelIDs = []uint64{
  4627  			rpcPayReq.OutgoingChanId,
  4628  		}
  4629  	}
  4630  
  4631  	// Pass along a last hop restriction if specified.
  4632  	if len(rpcPayReq.LastHopPubkey) > 0 {
  4633  		lastHop, err := route.NewVertexFromBytes(
  4634  			rpcPayReq.LastHopPubkey,
  4635  		)
  4636  		if err != nil {
  4637  			return payIntent, err
  4638  		}
  4639  		payIntent.lastHop = &lastHop
  4640  	}
  4641  
  4642  	// Take the CLTV limit from the request if set, otherwise use the max.
  4643  	cltvLimit, err := routerrpc.ValidateCLTVLimit(
  4644  		rpcPayReq.CltvLimit, r.cfg.MaxOutgoingCltvExpiry,
  4645  	)
  4646  	if err != nil {
  4647  		return payIntent, err
  4648  	}
  4649  	payIntent.cltvLimit = cltvLimit
  4650  
  4651  	customRecords := record.CustomSet(rpcPayReq.DestCustomRecords)
  4652  	if err := customRecords.Validate(); err != nil {
  4653  		return payIntent, err
  4654  	}
  4655  	payIntent.destCustomRecords = customRecords
  4656  
  4657  	validateDest := func(dest route.Vertex) error {
  4658  		if rpcPayReq.AllowSelfPayment {
  4659  			return nil
  4660  		}
  4661  
  4662  		if dest == r.selfNode {
  4663  			return errors.New("self-payments not allowed")
  4664  		}
  4665  
  4666  		return nil
  4667  	}
  4668  
  4669  	// If the payment request field isn't blank, then the details of the
  4670  	// invoice are encoded entirely within the encoded payReq.  So we'll
  4671  	// attempt to decode it, populating the payment accordingly.
  4672  	if rpcPayReq.PaymentRequest != "" {
  4673  		payReq, err := zpay32.Decode(
  4674  			rpcPayReq.PaymentRequest, r.cfg.ActiveNetParams.Params,
  4675  		)
  4676  		if err != nil {
  4677  			return payIntent, err
  4678  		}
  4679  
  4680  		// Copy the decoded payment hash so that callers can identify
  4681  		// the original payreq in case of errors.
  4682  		copy(payIntent.rHash[:], payReq.PaymentHash[:])
  4683  
  4684  		// Next, we'll ensure that this payreq hasn't already expired.
  4685  		err = routerrpc.ValidatePayReqExpiry(payReq)
  4686  		if err != nil {
  4687  			return payIntent, err
  4688  		}
  4689  
  4690  		// If the amount was not included in the invoice, then we let
  4691  		// the payee specify the amount of atoms they wish to send.
  4692  		// We override the amount to pay with the amount provided from
  4693  		// the payment request.
  4694  		if payReq.MilliAt == nil {
  4695  			amt, err := lnrpc.UnmarshallAmt(
  4696  				rpcPayReq.Amt, rpcPayReq.AmtMAtoms,
  4697  			)
  4698  			if err != nil {
  4699  				return payIntent, err
  4700  			}
  4701  			if amt == 0 {
  4702  				return payIntent, errors.New("amount must be " +
  4703  					"specified when paying a zero amount " +
  4704  					"invoice")
  4705  			}
  4706  
  4707  			payIntent.mat = amt
  4708  		} else {
  4709  			payIntent.mat = *payReq.MilliAt
  4710  		}
  4711  
  4712  		// Calculate the fee limit that should be used for this payment.
  4713  		payIntent.feeLimit = lnrpc.CalculateFeeLimit(
  4714  			rpcPayReq.FeeLimit, payIntent.mat,
  4715  		)
  4716  
  4717  		copy(payIntent.rHash[:], payReq.PaymentHash[:])
  4718  		destKey := payReq.Destination.SerializeCompressed()
  4719  		copy(payIntent.dest[:], destKey)
  4720  		payIntent.cltvDelta = uint16(payReq.MinFinalCLTVExpiry())
  4721  		payIntent.routeHints = payReq.RouteHints
  4722  		payIntent.payReq = []byte(rpcPayReq.PaymentRequest)
  4723  		payIntent.destFeatures = payReq.Features
  4724  		payIntent.paymentAddr = payReq.PaymentAddr
  4725  
  4726  		if err := validateDest(payIntent.dest); err != nil {
  4727  			return payIntent, err
  4728  		}
  4729  
  4730  		// Do bounds checking with the block padding.
  4731  		err = routing.ValidateCLTVLimit(
  4732  			payIntent.cltvLimit, payIntent.cltvDelta, true,
  4733  		)
  4734  		if err != nil {
  4735  			return payIntent, err
  4736  		}
  4737  
  4738  		return payIntent, nil
  4739  	}
  4740  
  4741  	// At this point, a destination MUST be specified, so we'll convert it
  4742  	// into the proper representation now. The destination will either be
  4743  	// encoded as raw bytes, or via a hex string.
  4744  	var pubBytes []byte
  4745  	if len(rpcPayReq.Dest) != 0 {
  4746  		pubBytes = rpcPayReq.Dest
  4747  	} else {
  4748  		var err error
  4749  		pubBytes, err = hex.DecodeString(rpcPayReq.DestString)
  4750  		if err != nil {
  4751  			return payIntent, err
  4752  		}
  4753  	}
  4754  	if len(pubBytes) != 33 {
  4755  		return payIntent, errors.New("invalid key length")
  4756  	}
  4757  	copy(payIntent.dest[:], pubBytes)
  4758  
  4759  	if err := validateDest(payIntent.dest); err != nil {
  4760  		return payIntent, err
  4761  	}
  4762  
  4763  	// Payment address may not be needed by legacy invoices.
  4764  	if len(rpcPayReq.PaymentAddr) != 0 && len(rpcPayReq.PaymentAddr) != 32 {
  4765  		return payIntent, errors.New("invalid payment address length")
  4766  	}
  4767  
  4768  	// Set the payment address if it was explicitly defined with the
  4769  	// rpcPaymentRequest.
  4770  	// Note that the payment address for the payIntent should be nil if none
  4771  	// was provided with the rpcPaymentRequest.
  4772  	if len(rpcPayReq.PaymentAddr) != 0 {
  4773  		payIntent.paymentAddr = &[32]byte{}
  4774  		copy(payIntent.paymentAddr[:], rpcPayReq.PaymentAddr)
  4775  	}
  4776  
  4777  	// Otherwise, If the payment request field was not specified
  4778  	// (and a custom route wasn't specified), construct the payment
  4779  	// from the other fields.
  4780  	payIntent.mat, err = lnrpc.UnmarshallAmt(
  4781  		rpcPayReq.Amt, rpcPayReq.AmtMAtoms,
  4782  	)
  4783  	if err != nil {
  4784  		return payIntent, err
  4785  	}
  4786  
  4787  	// Calculate the fee limit that should be used for this payment.
  4788  	payIntent.feeLimit = lnrpc.CalculateFeeLimit(
  4789  		rpcPayReq.FeeLimit, payIntent.mat,
  4790  	)
  4791  
  4792  	if rpcPayReq.FinalCltvDelta != 0 {
  4793  		payIntent.cltvDelta = uint16(rpcPayReq.FinalCltvDelta)
  4794  	} else {
  4795  		// If no final cltv delta is given, assume the default that we
  4796  		// use when creating an invoice. We do not assume the default of
  4797  		// 9 blocks that is defined in BOLT-11, because this is never
  4798  		// enough for other lnd nodes.
  4799  		payIntent.cltvDelta = uint16(r.cfg.Decred.TimeLockDelta)
  4800  	}
  4801  
  4802  	// Do bounds checking with the block padding so the router isn't left
  4803  	// with a zombie payment in case the user messes up.
  4804  	err = routing.ValidateCLTVLimit(
  4805  		payIntent.cltvLimit, payIntent.cltvDelta, true,
  4806  	)
  4807  	if err != nil {
  4808  		return payIntent, err
  4809  	}
  4810  
  4811  	// If the user is manually specifying payment details, then the payment
  4812  	// hash may be encoded as a string.
  4813  	switch {
  4814  	case rpcPayReq.PaymentHashString != "":
  4815  		paymentHash, err := hex.DecodeString(
  4816  			rpcPayReq.PaymentHashString,
  4817  		)
  4818  		if err != nil {
  4819  			return payIntent, err
  4820  		}
  4821  
  4822  		copy(payIntent.rHash[:], paymentHash)
  4823  
  4824  	default:
  4825  		copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
  4826  	}
  4827  
  4828  	// Unmarshal any custom destination features.
  4829  	payIntent.destFeatures, err = routerrpc.UnmarshalFeatures(
  4830  		rpcPayReq.DestFeatures,
  4831  	)
  4832  	if err != nil {
  4833  		return payIntent, err
  4834  	}
  4835  
  4836  	// Currently, within the bootstrap phase of the network, we limit the
  4837  	// largest payment size allotted to (2^32) - 1 milli-atoms or 4.29
  4838  	// million atoms.
  4839  	if payIntent.mat > MaxPaymentMAtoms {
  4840  		// In this case, we'll send an error to the caller, but
  4841  		// continue our loop for the next payment.
  4842  		return payIntent, fmt.Errorf("payment of %v is too large, "+
  4843  			"max payment allowed is %v", payIntent.mat,
  4844  			MaxPaymentMAtoms)
  4845  
  4846  	}
  4847  
  4848  	return payIntent, nil
  4849  }
  4850  
  4851  type paymentIntentResponse struct {
  4852  	Route    *route.Route
  4853  	Preimage [32]byte
  4854  	Err      error
  4855  }
  4856  
  4857  // checkCanSendPayment verifies whether the minimum conditions for sending the
  4858  // given payment from this node are met, such as having an open channel with a
  4859  // live peer with enough outbound bandwidth for sending it.
  4860  func (r *rpcServer) checkCanSendPayment(payIntent *rpcPaymentIntent) error {
  4861  	// Return early if we've been instructed to ignore the available
  4862  	// inbound bandwidth.
  4863  	if payIntent.ignoreMaxOutboundAmt {
  4864  		return nil
  4865  	}
  4866  
  4867  	// Verify whether there is at least one channel with enough outbound
  4868  	// capacity (after accounting for channel reserves) to receive the
  4869  	// payment from this invoice.
  4870  	openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
  4871  	if err != nil {
  4872  		return err
  4873  	}
  4874  
  4875  	// If the node has no open channels, it can't possibly send payment for
  4876  	// this.
  4877  	if len(openChannels) == 0 {
  4878  		return errors.New("no open channels")
  4879  	}
  4880  
  4881  	// Determine how much we're likely to pay as tx fee for adding a new
  4882  	// htlc. We use the minimum relay fee since this is just a quick
  4883  	// estimate on whether we'll be able to fulfill the payment.
  4884  	relayFee := r.server.cc.FeeEstimator.RelayFeePerKB()
  4885  	htlcFee := relayFee.FeeForSize(input.HTLCOutputSize)
  4886  
  4887  	// Convert the payment amount to atoms, since we can't have an open
  4888  	// channel with less than 1 atom and milliatom payments might not alter
  4889  	// the channel balances.
  4890  	amt := payIntent.mat.ToAtoms() + htlcFee
  4891  
  4892  	// Loop through all available channels, check for liveliness and
  4893  	// capacity.
  4894  	var maxOutboundAmt dcrutil.Amount
  4895  	for _, channel := range openChannels {
  4896  		// Ensure the channel is active and the remote peer is online,
  4897  		// which is required to send to this channel.
  4898  		chanPoint := &channel.FundingOutpoint
  4899  		if _, err := r.server.FindPeer(channel.IdentityPub); err != nil {
  4900  			// We're not connected to the peer, therefore can't
  4901  			// send htlcs to it.
  4902  			continue
  4903  		}
  4904  
  4905  		// Try to retrieve a the link from the htlc switch to verify we
  4906  		// can currently use this channel for routing.
  4907  		channelID := lnwire.NewChanIDFromOutPoint(chanPoint)
  4908  		var link htlcswitch.ChannelUpdateHandler
  4909  		if link, err = r.server.htlcSwitch.GetLink(channelID); err != nil {
  4910  			continue
  4911  		}
  4912  
  4913  		// If this link isn' eligible for htcl forwarding, it means we
  4914  		// can't send to it.
  4915  		if !link.EligibleToForward() {
  4916  			continue
  4917  		}
  4918  
  4919  		// We have now verified the channel is online and can route
  4920  		// htlcs through it. Verifiy if it has enough outbound capacity
  4921  		// for this new invoice.
  4922  		//
  4923  		// Outbound capacity for a channel is how much the local node
  4924  		// currently has minus what the remote node requires us to
  4925  		// maintain at all times (chan_reserve).
  4926  		capacity := channel.LocalCommitment.LocalBalance.ToAtoms() -
  4927  			channel.LocalChanCfg.ChannelConstraints.ChanReserve
  4928  		maxOutboundAmt += capacity
  4929  
  4930  		// Return early if we have enough outbound capacity.
  4931  		if maxOutboundAmt >= amt {
  4932  			return nil
  4933  		}
  4934  	}
  4935  
  4936  	if maxOutboundAmt == 0 {
  4937  		return errors.New("no online channels found")
  4938  	}
  4939  
  4940  	missingCap := amt - maxOutboundAmt
  4941  	return fmt.Errorf("not enough outbound capacity (missing %d atoms)",
  4942  		missingCap)
  4943  }
  4944  
  4945  // dispatchPaymentIntent attempts to fully dispatch an RPC payment intent.
  4946  // We'll either pass the payment as a whole to the channel router, or give it a
  4947  // pre-built route. The first error this method returns denotes if we were
  4948  // unable to save the payment. The second error returned denotes if the payment
  4949  // didn't succeed.
  4950  func (r *rpcServer) dispatchPaymentIntent(
  4951  	payIntent *rpcPaymentIntent) (*paymentIntentResponse, error) {
  4952  
  4953  	// Perform a pre-flight check for sending this payment.
  4954  	if err := r.checkCanSendPayment(payIntent); err != nil {
  4955  		return &paymentIntentResponse{
  4956  			Err: err,
  4957  		}, nil
  4958  	}
  4959  
  4960  	// Construct a payment request to send to the channel router. If the
  4961  	// payment is successful, the route chosen will be returned. Otherwise,
  4962  	// we'll get a non-nil error.
  4963  	var (
  4964  		preImage  [32]byte
  4965  		route     *route.Route
  4966  		routerErr error
  4967  	)
  4968  
  4969  	// If a route was specified, then we'll pass the route directly to the
  4970  	// router, otherwise we'll create a payment session to execute it.
  4971  	if payIntent.route == nil {
  4972  		payment := &routing.LightningPayment{
  4973  			Target:             payIntent.dest,
  4974  			Amount:             payIntent.mat,
  4975  			FinalCLTVDelta:     payIntent.cltvDelta,
  4976  			FeeLimit:           payIntent.feeLimit,
  4977  			CltvLimit:          payIntent.cltvLimit,
  4978  			RouteHints:         payIntent.routeHints,
  4979  			OutgoingChannelIDs: payIntent.outgoingChannelIDs,
  4980  			LastHop:            payIntent.lastHop,
  4981  			PaymentRequest:     payIntent.payReq,
  4982  			PayAttemptTimeout:  routing.DefaultPayAttemptTimeout,
  4983  			DestCustomRecords:  payIntent.destCustomRecords,
  4984  			DestFeatures:       payIntent.destFeatures,
  4985  			PaymentAddr:        payIntent.paymentAddr,
  4986  
  4987  			// Don't enable multi-part payments on the main rpc.
  4988  			// Users need to use routerrpc for that.
  4989  			MaxParts: 1,
  4990  		}
  4991  		err := payment.SetPaymentHash(payIntent.rHash)
  4992  		if err != nil {
  4993  			return nil, err
  4994  		}
  4995  
  4996  		preImage, route, routerErr = r.server.chanRouter.SendPayment(
  4997  			payment,
  4998  		)
  4999  	} else {
  5000  		var attempt *channeldb.HTLCAttempt
  5001  		attempt, routerErr = r.server.chanRouter.SendToRoute(
  5002  			payIntent.rHash, payIntent.route,
  5003  		)
  5004  
  5005  		if routerErr == nil {
  5006  			preImage = attempt.Settle.Preimage
  5007  		}
  5008  
  5009  		route = payIntent.route
  5010  	}
  5011  
  5012  	// If the route failed, then we'll return a nil save err, but a non-nil
  5013  	// routing err.
  5014  	if routerErr != nil {
  5015  		rpcsLog.Warnf("Unable to send payment: %v", routerErr)
  5016  
  5017  		return &paymentIntentResponse{
  5018  			Err: routerErr,
  5019  		}, nil
  5020  	}
  5021  
  5022  	return &paymentIntentResponse{
  5023  		Route:    route,
  5024  		Preimage: preImage,
  5025  	}, nil
  5026  }
  5027  
  5028  // sendPayment takes a paymentStream (a source of pre-built routes or payment
  5029  // requests) and continually attempt to dispatch payment requests written to
  5030  // the write end of the stream. Responses will also be streamed back to the
  5031  // client via the write end of the stream. This method is by both SendToRoute
  5032  // and SendPayment as the logic is virtually identical.
  5033  func (r *rpcServer) sendPayment(stream *paymentStream) error {
  5034  	payChan := make(chan *rpcPaymentIntent)
  5035  	errChan := make(chan error, 1)
  5036  
  5037  	// We don't allow payments to be sent while the daemon itself is still
  5038  	// syncing as we may be trying to sent a payment over a "stale"
  5039  	// channel.
  5040  	if !r.server.Started() {
  5041  		return ErrServerNotActive
  5042  	}
  5043  
  5044  	// TODO(roasbeef): check payment filter to see if already used?
  5045  
  5046  	// In order to limit the level of concurrency and prevent a client from
  5047  	// attempting to OOM the server, we'll set up a semaphore to create an
  5048  	// upper ceiling on the number of outstanding payments.
  5049  	const numOutstandingPayments = 2000
  5050  	htlcSema := make(chan struct{}, numOutstandingPayments)
  5051  	for i := 0; i < numOutstandingPayments; i++ {
  5052  		htlcSema <- struct{}{}
  5053  	}
  5054  
  5055  	// We keep track of the running goroutines and set up a quit signal we
  5056  	// can use to request them to exit if the method returns because of an
  5057  	// encountered error.
  5058  	var wg sync.WaitGroup
  5059  	reqQuit := make(chan struct{})
  5060  	defer close(reqQuit)
  5061  
  5062  	// Launch a new goroutine to handle reading new payment requests from
  5063  	// the client. This way we can handle errors independently of blocking
  5064  	// and waiting for the next payment request to come through.
  5065  	// TODO(joostjager): Callers expect result to come in in the same order
  5066  	// as the request were sent, but this is far from guarantueed in the
  5067  	// code below.
  5068  	wg.Add(1)
  5069  	go func() {
  5070  		defer wg.Done()
  5071  
  5072  		for {
  5073  			select {
  5074  			case <-reqQuit:
  5075  				return
  5076  
  5077  			default:
  5078  				// Receive the next pending payment within the
  5079  				// stream sent by the client. If we read the
  5080  				// EOF sentinel, then the client has closed the
  5081  				// stream, and we can exit normally.
  5082  				nextPayment, err := stream.recv()
  5083  				if err == io.EOF {
  5084  					close(payChan)
  5085  					return
  5086  				} else if err != nil {
  5087  					rpcsLog.Errorf("Failed receiving from "+
  5088  						"stream: %v", err)
  5089  
  5090  					select {
  5091  					case errChan <- err:
  5092  					default:
  5093  					}
  5094  					return
  5095  				}
  5096  
  5097  				// Populate the next payment, either from the
  5098  				// payment request, or from the explicitly set
  5099  				// fields. If the payment proto wasn't well
  5100  				// formed, then we'll send an error reply and
  5101  				// wait for the next payment.
  5102  				payIntent, err := r.extractPaymentIntent(
  5103  					nextPayment,
  5104  				)
  5105  				if err != nil {
  5106  					if err := stream.send(&lnrpc.SendResponse{
  5107  						PaymentError: err.Error(),
  5108  						PaymentHash:  payIntent.rHash[:],
  5109  					}); err != nil {
  5110  						rpcsLog.Errorf("Failed "+
  5111  							"sending on "+
  5112  							"stream: %v", err)
  5113  
  5114  						select {
  5115  						case errChan <- err:
  5116  						default:
  5117  						}
  5118  						return
  5119  					}
  5120  					continue
  5121  				}
  5122  
  5123  				// If the payment was well formed, then we'll
  5124  				// send to the dispatch goroutine, or exit,
  5125  				// which ever comes first.
  5126  				select {
  5127  				case payChan <- &payIntent:
  5128  				case <-reqQuit:
  5129  					return
  5130  				}
  5131  			}
  5132  		}
  5133  	}()
  5134  
  5135  sendLoop:
  5136  	for {
  5137  		select {
  5138  
  5139  		// If we encounter and error either during sending or
  5140  		// receiving, we return directly, closing the stream.
  5141  		case err := <-errChan:
  5142  			return err
  5143  
  5144  		case <-r.quit:
  5145  			return errors.New("rpc server shutting down")
  5146  
  5147  		case payIntent, ok := <-payChan:
  5148  			// If the receive loop is done, we break the send loop
  5149  			// and wait for the ongoing payments to finish before
  5150  			// exiting.
  5151  			if !ok {
  5152  				break sendLoop
  5153  			}
  5154  
  5155  			// We launch a new goroutine to execute the current
  5156  			// payment so we can continue to serve requests while
  5157  			// this payment is being dispatched.
  5158  			wg.Add(1)
  5159  			go func() {
  5160  				defer wg.Done()
  5161  
  5162  				// Attempt to grab a free semaphore slot, using
  5163  				// a defer to eventually release the slot
  5164  				// regardless of payment success.
  5165  				select {
  5166  				case <-htlcSema:
  5167  				case <-reqQuit:
  5168  					return
  5169  				}
  5170  				defer func() {
  5171  					htlcSema <- struct{}{}
  5172  				}()
  5173  
  5174  				resp, saveErr := r.dispatchPaymentIntent(
  5175  					payIntent,
  5176  				)
  5177  
  5178  				switch {
  5179  				// If we were unable to save the state of the
  5180  				// payment, then we'll return the error to the
  5181  				// user, and terminate.
  5182  				case saveErr != nil:
  5183  					rpcsLog.Errorf("Failed dispatching "+
  5184  						"payment intent: %v", saveErr)
  5185  
  5186  					select {
  5187  					case errChan <- saveErr:
  5188  					default:
  5189  					}
  5190  					return
  5191  
  5192  				// If we receive payment error than, instead of
  5193  				// terminating the stream, send error response
  5194  				// to the user.
  5195  				case resp.Err != nil:
  5196  					err := stream.send(&lnrpc.SendResponse{
  5197  						PaymentError: resp.Err.Error(),
  5198  						PaymentHash:  payIntent.rHash[:],
  5199  					})
  5200  					if err != nil {
  5201  						rpcsLog.Errorf("Failed "+
  5202  							"sending error "+
  5203  							"response: %v", err)
  5204  
  5205  						select {
  5206  						case errChan <- err:
  5207  						default:
  5208  						}
  5209  					}
  5210  					return
  5211  				}
  5212  
  5213  				backend := r.routerBackend
  5214  				marshalledRouted, err := backend.MarshallRoute(
  5215  					resp.Route,
  5216  				)
  5217  				if err != nil {
  5218  					errChan <- err
  5219  					return
  5220  				}
  5221  
  5222  				err = stream.send(&lnrpc.SendResponse{
  5223  					PaymentHash:     payIntent.rHash[:],
  5224  					PaymentPreimage: resp.Preimage[:],
  5225  					PaymentRoute:    marshalledRouted,
  5226  				})
  5227  				if err != nil {
  5228  					rpcsLog.Errorf("Failed sending "+
  5229  						"response: %v", err)
  5230  
  5231  					select {
  5232  					case errChan <- err:
  5233  					default:
  5234  					}
  5235  					return
  5236  				}
  5237  			}()
  5238  		}
  5239  	}
  5240  
  5241  	// Wait for all goroutines to finish before closing the stream.
  5242  	wg.Wait()
  5243  	return nil
  5244  }
  5245  
  5246  // SendPaymentSync is the synchronous non-streaming version of SendPayment.
  5247  // This RPC is intended to be consumed by clients of the REST proxy.
  5248  // Additionally, this RPC expects the destination's public key and the payment
  5249  // hash (if any) to be encoded as hex strings.
  5250  func (r *rpcServer) SendPaymentSync(ctx context.Context,
  5251  	nextPayment *lnrpc.SendRequest) (*lnrpc.SendResponse, error) {
  5252  
  5253  	return r.sendPaymentSync(ctx, &rpcPaymentRequest{
  5254  		SendRequest: nextPayment,
  5255  	})
  5256  }
  5257  
  5258  // SendToRouteSync is the synchronous non-streaming version of SendToRoute.
  5259  // This RPC is intended to be consumed by clients of the REST proxy.
  5260  // Additionally, this RPC expects the payment hash (if any) to be encoded as
  5261  // hex strings.
  5262  func (r *rpcServer) SendToRouteSync(ctx context.Context,
  5263  	req *lnrpc.SendToRouteRequest) (*lnrpc.SendResponse, error) {
  5264  
  5265  	if req.Route == nil {
  5266  		return nil, fmt.Errorf("unable to send, no routes provided")
  5267  	}
  5268  
  5269  	paymentRequest, err := r.unmarshallSendToRouteRequest(req)
  5270  	if err != nil {
  5271  		return nil, err
  5272  	}
  5273  
  5274  	return r.sendPaymentSync(ctx, paymentRequest)
  5275  }
  5276  
  5277  // sendPaymentSync is the synchronous variant of sendPayment. It will block and
  5278  // wait until the payment has been fully completed.
  5279  func (r *rpcServer) sendPaymentSync(ctx context.Context,
  5280  	nextPayment *rpcPaymentRequest) (*lnrpc.SendResponse, error) {
  5281  
  5282  	// We don't allow payments to be sent while the daemon itself is still
  5283  	// syncing as we may be trying to sent a payment over a "stale"
  5284  	// channel.
  5285  	if !r.server.Started() {
  5286  		return nil, ErrServerNotActive
  5287  	}
  5288  
  5289  	// First we'll attempt to map the proto describing the next payment to
  5290  	// an intent that we can pass to local sub-systems.
  5291  	payIntent, err := r.extractPaymentIntent(nextPayment)
  5292  	if err != nil {
  5293  		return nil, err
  5294  	}
  5295  
  5296  	// With the payment validated, we'll now attempt to dispatch the
  5297  	// payment.
  5298  	resp, saveErr := r.dispatchPaymentIntent(&payIntent)
  5299  	switch {
  5300  	case saveErr != nil:
  5301  		return nil, saveErr
  5302  
  5303  	case resp.Err != nil:
  5304  		return &lnrpc.SendResponse{
  5305  			PaymentError: resp.Err.Error(),
  5306  			PaymentHash:  payIntent.rHash[:],
  5307  		}, nil
  5308  	}
  5309  
  5310  	rpcRoute, err := r.routerBackend.MarshallRoute(resp.Route)
  5311  	if err != nil {
  5312  		return nil, err
  5313  	}
  5314  
  5315  	return &lnrpc.SendResponse{
  5316  		PaymentHash:     payIntent.rHash[:],
  5317  		PaymentPreimage: resp.Preimage[:],
  5318  		PaymentRoute:    rpcRoute,
  5319  	}, nil
  5320  }
  5321  
  5322  // checkCanReceiveInvoice performs a check on available inbound capacity from
  5323  // directly connected channels to ensure the passed invoice can be settled.
  5324  //
  5325  // It returns nil if there is enough capacity to potentially settle the invoice
  5326  // or an error otherwise.
  5327  func (r *rpcServer) checkCanReceiveInvoice(ctx context.Context,
  5328  	invoice *lnrpc.Invoice) error {
  5329  
  5330  	// Return early if we've been instructed to ignore the available inbound
  5331  	// bandwidth.
  5332  	if invoice.IgnoreMaxInboundAmt {
  5333  		return nil
  5334  	}
  5335  
  5336  	// Verify whether there is at least one channel with enough inbound
  5337  	// capacity (after accounting for channel reserves) to receive the payment
  5338  	// from this invoice.
  5339  	openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
  5340  	if err != nil {
  5341  		return err
  5342  	}
  5343  
  5344  	// If the node has no open channels, it can't possibly receive payment for
  5345  	// this.
  5346  	if len(openChannels) == 0 {
  5347  		return errors.New("no open channels")
  5348  	}
  5349  
  5350  	amt := dcrutil.Amount(invoice.Value)
  5351  	if invoice.ValueMAtoms != 0 {
  5352  		amt = dcrutil.Amount(invoice.ValueMAtoms / 1000)
  5353  		if invoice.ValueMAtoms%1000 > 0 {
  5354  			amt += 1
  5355  		}
  5356  	}
  5357  
  5358  	// Loop through all available channels, check for liveliness and capacity.
  5359  	var maxInbound dcrutil.Amount
  5360  	var debugErrs []string
  5361  	for _, channel := range openChannels {
  5362  		// Ensure the channel is active and the remote peer is online, which is
  5363  		// required to receive from this channel.
  5364  		chanPoint := &channel.FundingOutpoint
  5365  		if _, err := r.server.FindPeer(channel.IdentityPub); err != nil {
  5366  			// We're not connected to the peer, therefore can't receive htlcs
  5367  			// from it.
  5368  			debugErrs = append(debugErrs,
  5369  				fmt.Sprintf("unable to find peer for chanpoint %s: %v", chanPoint, err))
  5370  			continue
  5371  		}
  5372  
  5373  		// Try to retrieve a the link from the htlc switch to verify we can
  5374  		// currently use this channel for routing.
  5375  		channelID := lnwire.NewChanIDFromOutPoint(chanPoint)
  5376  		var link htlcswitch.ChannelUpdateHandler
  5377  		if link, err = r.server.htlcSwitch.GetLink(channelID); err != nil {
  5378  			debugErrs = append(debugErrs,
  5379  				fmt.Sprintf("failed to get link for chanpoint %s: %v", chanPoint, err))
  5380  			continue
  5381  		}
  5382  
  5383  		// If this link isn' eligible for htcl forwarding, it means we can't
  5384  		// receive from it.
  5385  		if !link.EligibleToForward() {
  5386  			debugErrs = append(debugErrs,
  5387  				fmt.Sprintf("link is not eligable to forward chanpoint %s -- skipping", chanPoint))
  5388  			continue
  5389  		}
  5390  
  5391  		// We have now verified the channel is online and can route htlcs
  5392  		// through it. Verifiy if it has enough inbound capacity for this new
  5393  		// invoice.
  5394  		//
  5395  		// Inbound capacity for a channel is how much the remote node currently
  5396  		// has (the remote_balance from our pov) minus what we require the
  5397  		// remote node to maintain at all times (chan_reserve).
  5398  		capacity := channel.RemoteCommitment.RemoteBalance.ToAtoms() -
  5399  			channel.RemoteChanCfg.ChannelConstraints.ChanReserve
  5400  		maxInbound += capacity
  5401  
  5402  		// Stop early if we have enough inbound capacity already.
  5403  		if maxInbound >= amt {
  5404  			return nil
  5405  		}
  5406  	}
  5407  
  5408  	if maxInbound == 0 {
  5409  		return errors.New("no online channels found")
  5410  	}
  5411  
  5412  	for _, debugErr := range debugErrs {
  5413  		rpcsLog.Debugf("addinvoice(amt %d): %v", amt, debugErr)
  5414  	}
  5415  
  5416  	missingCap := amt - maxInbound
  5417  	return fmt.Errorf("not enough inbound capacity (missing %d atoms)",
  5418  		missingCap)
  5419  }
  5420  
  5421  // AddInvoice attempts to add a new invoice to the invoice database. Any
  5422  // duplicated invoices are rejected, therefore all invoices *must* have a
  5423  // unique payment preimage.
  5424  func (r *rpcServer) AddInvoice(ctx context.Context,
  5425  	invoice *lnrpc.Invoice) (*lnrpc.AddInvoiceResponse, error) {
  5426  
  5427  	if err := r.checkCanReceiveInvoice(ctx, invoice); err != nil {
  5428  		return nil, err
  5429  	}
  5430  
  5431  	defaultDelta := r.cfg.Decred.TimeLockDelta
  5432  
  5433  	addInvoiceCfg := &invoicesrpc.AddInvoiceConfig{
  5434  		AddInvoice:        r.server.invoices.AddInvoice,
  5435  		IsChannelActive:   r.server.htlcSwitch.HasActiveLink,
  5436  		ChainParams:       r.cfg.ActiveNetParams.Params,
  5437  		NodeSigner:        r.server.nodeSigner,
  5438  		DefaultCLTVExpiry: defaultDelta,
  5439  		ChanDB:            r.server.chanStateDB,
  5440  		Graph:             r.server.graphDB,
  5441  		GenInvoiceFeatures: func() *lnwire.FeatureVector {
  5442  			return r.server.featureMgr.Get(feature.SetInvoice)
  5443  		},
  5444  		GenAmpInvoiceFeatures: func() *lnwire.FeatureVector {
  5445  			return r.server.featureMgr.Get(feature.SetInvoiceAmp)
  5446  		},
  5447  	}
  5448  
  5449  	value, err := lnrpc.UnmarshallAmt(invoice.Value, invoice.ValueMAtoms)
  5450  	if err != nil {
  5451  		return nil, err
  5452  	}
  5453  
  5454  	// Convert the passed routing hints to the required format.
  5455  	routeHints, err := invoicesrpc.CreateZpay32HopHints(invoice.RouteHints)
  5456  	if err != nil {
  5457  		return nil, err
  5458  	}
  5459  	addInvoiceData := &invoicesrpc.AddInvoiceData{
  5460  		Memo:            invoice.Memo,
  5461  		Value:           value,
  5462  		DescriptionHash: invoice.DescriptionHash,
  5463  		Expiry:          invoice.Expiry,
  5464  		FallbackAddr:    invoice.FallbackAddr,
  5465  		CltvExpiry:      invoice.CltvExpiry,
  5466  		Private:         invoice.Private,
  5467  		RouteHints:      routeHints,
  5468  		Amp:             invoice.IsAmp,
  5469  	}
  5470  
  5471  	if invoice.RPreimage != nil {
  5472  		preimage, err := lntypes.MakePreimage(invoice.RPreimage)
  5473  		if err != nil {
  5474  			return nil, err
  5475  		}
  5476  		addInvoiceData.Preimage = &preimage
  5477  	}
  5478  
  5479  	hash, dbInvoice, err := invoicesrpc.AddInvoice(
  5480  		ctx, addInvoiceCfg, addInvoiceData,
  5481  	)
  5482  	if err != nil {
  5483  		return nil, err
  5484  	}
  5485  
  5486  	return &lnrpc.AddInvoiceResponse{
  5487  		AddIndex:       dbInvoice.AddIndex,
  5488  		PaymentRequest: string(dbInvoice.PaymentRequest),
  5489  		RHash:          hash[:],
  5490  		PaymentAddr:    dbInvoice.Terms.PaymentAddr[:],
  5491  	}, nil
  5492  }
  5493  
  5494  // LookupInvoice attempts to look up an invoice according to its payment hash.
  5495  // The passed payment hash *must* be exactly 32 bytes, if not an error is
  5496  // returned.
  5497  func (r *rpcServer) LookupInvoice(ctx context.Context,
  5498  	req *lnrpc.PaymentHash) (*lnrpc.Invoice, error) {
  5499  
  5500  	var (
  5501  		payHash [32]byte
  5502  		rHash   []byte
  5503  		err     error
  5504  	)
  5505  
  5506  	// If the RHash as a raw string was provided, then decode that and use
  5507  	// that directly. Otherwise, we use the raw bytes provided.
  5508  	if req.RHashStr != "" {
  5509  		rHash, err = hex.DecodeString(req.RHashStr)
  5510  		if err != nil {
  5511  			return nil, err
  5512  		}
  5513  	} else {
  5514  		rHash = req.RHash
  5515  	}
  5516  
  5517  	// Ensure that the payment hash is *exactly* 32-bytes.
  5518  	if len(rHash) != 0 && len(rHash) != 32 {
  5519  		return nil, fmt.Errorf("payment hash must be exactly "+
  5520  			"32 bytes, is instead %v", len(rHash))
  5521  	}
  5522  	copy(payHash[:], rHash)
  5523  
  5524  	rpcsLog.Tracef("[lookupinvoice] searching for invoice %x", payHash[:])
  5525  
  5526  	invoice, err := r.server.invoices.LookupInvoice(payHash)
  5527  	switch {
  5528  	case err == channeldb.ErrInvoiceNotFound:
  5529  		return nil, status.Error(codes.NotFound, err.Error())
  5530  	case err != nil:
  5531  		return nil, err
  5532  	}
  5533  
  5534  	rpcsLog.Tracef("[lookupinvoice] located invoice %v",
  5535  		newLogClosure(func() string {
  5536  			return spew.Sdump(invoice)
  5537  		}))
  5538  
  5539  	rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
  5540  		&invoice, r.cfg.ActiveNetParams.Params,
  5541  	)
  5542  	if err != nil {
  5543  		return nil, err
  5544  	}
  5545  
  5546  	return rpcInvoice, nil
  5547  }
  5548  
  5549  // ListInvoices returns a list of all the invoices currently stored within the
  5550  // database. Any active debug invoices are ignored.
  5551  func (r *rpcServer) ListInvoices(ctx context.Context,
  5552  	req *lnrpc.ListInvoiceRequest) (*lnrpc.ListInvoiceResponse, error) {
  5553  
  5554  	// If the number of invoices was not specified, then we'll default to
  5555  	// returning the latest 100 invoices.
  5556  	if req.NumMaxInvoices == 0 {
  5557  		req.NumMaxInvoices = 100
  5558  	}
  5559  
  5560  	// Next, we'll map the proto request into a format that is understood by
  5561  	// the database.
  5562  	q := channeldb.InvoiceQuery{
  5563  		IndexOffset:    req.IndexOffset,
  5564  		NumMaxInvoices: req.NumMaxInvoices,
  5565  		PendingOnly:    req.PendingOnly,
  5566  		Reversed:       req.Reversed,
  5567  	}
  5568  	invoiceSlice, err := r.server.miscDB.QueryInvoices(q)
  5569  	if err != nil {
  5570  		return nil, fmt.Errorf("unable to query invoices: %v", err)
  5571  	}
  5572  
  5573  	// Before returning the response, we'll need to convert each invoice
  5574  	// into it's proto representation.
  5575  	resp := &lnrpc.ListInvoiceResponse{
  5576  		Invoices:         make([]*lnrpc.Invoice, len(invoiceSlice.Invoices)),
  5577  		FirstIndexOffset: invoiceSlice.FirstIndexOffset,
  5578  		LastIndexOffset:  invoiceSlice.LastIndexOffset,
  5579  	}
  5580  	for i, invoice := range invoiceSlice.Invoices {
  5581  		invoice := invoice
  5582  		resp.Invoices[i], err = invoicesrpc.CreateRPCInvoice(
  5583  			&invoice, r.cfg.ActiveNetParams.Params,
  5584  		)
  5585  		if err != nil {
  5586  			// Instead of failing and returning an error, encode
  5587  			// the error message into the payment request field
  5588  			// (along with the original payment request stored in
  5589  			// the source db invoice) so that we can keep listing
  5590  			// the rest of the invoices even if a single invoice
  5591  			// was encoded in an otherwise invalid state.
  5592  			resp.Invoices[i] = &lnrpc.Invoice{
  5593  				PaymentRequest: fmt.Sprintf("[ERROR] %s (%s)",
  5594  					err.Error(), invoice.PaymentRequest),
  5595  			}
  5596  		}
  5597  	}
  5598  
  5599  	return resp, nil
  5600  }
  5601  
  5602  // SubscribeInvoices returns a uni-directional stream (server -> client) for
  5603  // notifying the client of newly added/settled invoices.
  5604  func (r *rpcServer) SubscribeInvoices(req *lnrpc.InvoiceSubscription,
  5605  	updateStream lnrpc.Lightning_SubscribeInvoicesServer) error {
  5606  
  5607  	invoiceClient, err := r.server.invoices.SubscribeNotifications(
  5608  		req.AddIndex, req.SettleIndex,
  5609  	)
  5610  	if err != nil {
  5611  		return err
  5612  	}
  5613  	defer invoiceClient.Cancel()
  5614  
  5615  	for {
  5616  		select {
  5617  		case newInvoice := <-invoiceClient.NewInvoices:
  5618  			rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
  5619  				newInvoice, r.cfg.ActiveNetParams.Params,
  5620  			)
  5621  			if err != nil {
  5622  				return err
  5623  			}
  5624  
  5625  			if err := updateStream.Send(rpcInvoice); err != nil {
  5626  				return err
  5627  			}
  5628  
  5629  		case settledInvoice := <-invoiceClient.SettledInvoices:
  5630  			rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
  5631  				settledInvoice, r.cfg.ActiveNetParams.Params,
  5632  			)
  5633  			if err != nil {
  5634  				return err
  5635  			}
  5636  
  5637  			if err := updateStream.Send(rpcInvoice); err != nil {
  5638  				return err
  5639  			}
  5640  
  5641  		// The response stream's context for whatever reason has been
  5642  		// closed. If context is closed by an exceeded deadline we will
  5643  		// return an error.
  5644  		case <-updateStream.Context().Done():
  5645  			if errors.Is(updateStream.Context().Err(), context.Canceled) {
  5646  				return nil
  5647  			}
  5648  			return updateStream.Context().Err()
  5649  
  5650  		case <-r.quit:
  5651  			return nil
  5652  		}
  5653  	}
  5654  }
  5655  
  5656  // SubscribeTransactions creates a uni-directional stream (server -> client) in
  5657  // which any newly discovered transactions relevant to the wallet are sent
  5658  // over.
  5659  func (r *rpcServer) SubscribeTransactions(req *lnrpc.GetTransactionsRequest,
  5660  	updateStream lnrpc.Lightning_SubscribeTransactionsServer) error {
  5661  
  5662  	txClient, err := r.server.cc.Wallet.SubscribeTransactions()
  5663  	if err != nil {
  5664  		return err
  5665  	}
  5666  	defer txClient.Cancel()
  5667  	rpcsLog.Infof("New transaction subscription")
  5668  
  5669  	for {
  5670  		select {
  5671  		case tx := <-txClient.ConfirmedTransactions():
  5672  			detail := lnrpc.RPCTransaction(tx)
  5673  			if err := updateStream.Send(detail); err != nil {
  5674  				return err
  5675  			}
  5676  
  5677  		case tx := <-txClient.UnconfirmedTransactions():
  5678  			detail := lnrpc.RPCTransaction(tx)
  5679  			if err := updateStream.Send(detail); err != nil {
  5680  				return err
  5681  			}
  5682  
  5683  		// The response stream's context for whatever reason has been
  5684  		// closed. If context is closed by an exceeded deadline we will
  5685  		// return an error.
  5686  		case <-updateStream.Context().Done():
  5687  			rpcsLog.Infof("Canceling transaction subscription")
  5688  			if errors.Is(updateStream.Context().Err(), context.Canceled) {
  5689  				return nil
  5690  			}
  5691  			return updateStream.Context().Err()
  5692  
  5693  		case <-r.quit:
  5694  			return nil
  5695  		}
  5696  	}
  5697  }
  5698  
  5699  // GetTransactions returns a list of describing all the known transactions
  5700  // relevant to the wallet.
  5701  func (r *rpcServer) GetTransactions(ctx context.Context,
  5702  	req *lnrpc.GetTransactionsRequest) (*lnrpc.TransactionDetails, error) {
  5703  
  5704  	// To remain backwards compatible with the old api, default to the
  5705  	// special case end height which will return transactions from the start
  5706  	// height until the chain tip, including unconfirmed transactions.
  5707  	var endHeight = dcrwallet.UnconfirmedHeight
  5708  
  5709  	// If the user has provided an end height, we overwrite our default.
  5710  	if req.EndHeight != 0 {
  5711  		endHeight = req.EndHeight
  5712  	}
  5713  
  5714  	transactions, err := r.server.cc.Wallet.ListTransactionDetails(
  5715  		req.StartHeight, endHeight, req.Account,
  5716  	)
  5717  	if err != nil {
  5718  		return nil, err
  5719  	}
  5720  
  5721  	return lnrpc.RPCTransactionDetails(transactions), nil
  5722  }
  5723  
  5724  // DescribeGraph returns a description of the latest graph state from the PoV
  5725  // of the node. The graph information is partitioned into two components: all
  5726  // the nodes/vertexes, and all the edges that connect the vertexes themselves.
  5727  // As this is a directed graph, the edges also contain the node directional
  5728  // specific routing policy which includes: the time lock delta, fee
  5729  // information, etc.
  5730  func (r *rpcServer) DescribeGraph(ctx context.Context,
  5731  	req *lnrpc.ChannelGraphRequest) (*lnrpc.ChannelGraph, error) {
  5732  
  5733  	resp := &lnrpc.ChannelGraph{}
  5734  	includeUnannounced := req.IncludeUnannounced
  5735  
  5736  	// Check to see if the cache is already populated, if so then we can
  5737  	// just return it directly.
  5738  	//
  5739  	// TODO(roasbeef): move this to an interceptor level feature?
  5740  	graphCacheActive := r.cfg.Caches.RPCGraphCacheDuration != 0
  5741  	if graphCacheActive {
  5742  		r.graphCache.Lock()
  5743  		defer r.graphCache.Unlock()
  5744  
  5745  		if r.describeGraphResp != nil {
  5746  			return r.describeGraphResp, nil
  5747  		}
  5748  	}
  5749  
  5750  	// Obtain the pointer to the global singleton channel graph, this will
  5751  	// provide a consistent view of the graph due to bbolt db's
  5752  	// transactional model.
  5753  	graph := r.server.graphDB
  5754  
  5755  	// First iterate through all the known nodes (connected or unconnected
  5756  	// within the graph), collating their current state into the RPC
  5757  	// response.
  5758  	err := graph.ForEachNode(func(_ kvdb.RTx, node *channeldb.LightningNode) error {
  5759  		nodeAddrs := make([]*lnrpc.NodeAddress, 0)
  5760  		for _, addr := range node.Addresses {
  5761  			nodeAddr := &lnrpc.NodeAddress{
  5762  				Network: addr.Network(),
  5763  				Addr:    addr.String(),
  5764  			}
  5765  			nodeAddrs = append(nodeAddrs, nodeAddr)
  5766  		}
  5767  
  5768  		lnNode := &lnrpc.LightningNode{
  5769  			LastUpdate: uint32(node.LastUpdate.Unix()),
  5770  			PubKey:     hex.EncodeToString(node.PubKeyBytes[:]),
  5771  			Addresses:  nodeAddrs,
  5772  			Alias:      node.Alias,
  5773  			Color:      routing.EncodeHexColor(node.Color),
  5774  			Features:   invoicesrpc.CreateRPCFeatures(node.Features),
  5775  		}
  5776  
  5777  		resp.Nodes = append(resp.Nodes, lnNode)
  5778  
  5779  		return nil
  5780  	})
  5781  	if err != nil {
  5782  		return nil, err
  5783  	}
  5784  
  5785  	// Next, for each active channel we know of within the graph, create a
  5786  	// similar response which details both the edge information as well as
  5787  	// the routing policies of th nodes connecting the two edges.
  5788  	err = graph.ForEachChannel(func(edgeInfo *channeldb.ChannelEdgeInfo,
  5789  		c1, c2 *channeldb.ChannelEdgePolicy) error {
  5790  
  5791  		// Do not include unannounced channels unless specifically
  5792  		// requested. Unannounced channels include both private channels as
  5793  		// well as public channels whose authentication proof were not
  5794  		// confirmed yet, hence were not announced.
  5795  		if !includeUnannounced && edgeInfo.AuthProof == nil {
  5796  			return nil
  5797  		}
  5798  
  5799  		edge := marshalDbEdge(edgeInfo, c1, c2)
  5800  		resp.Edges = append(resp.Edges, edge)
  5801  
  5802  		return nil
  5803  	})
  5804  	if err != nil && err != channeldb.ErrGraphNoEdgesFound {
  5805  		return nil, err
  5806  	}
  5807  
  5808  	// We still have the mutex held, so we can safely populate the cache
  5809  	// now to save on GC churn for this query, but only if the cache isn't
  5810  	// disabled.
  5811  	if graphCacheActive {
  5812  		r.describeGraphResp = resp
  5813  	}
  5814  
  5815  	return resp, nil
  5816  }
  5817  
  5818  func marshalDbEdge(edgeInfo *channeldb.ChannelEdgeInfo,
  5819  	c1, c2 *channeldb.ChannelEdgePolicy) *lnrpc.ChannelEdge {
  5820  
  5821  	// Make sure the policies match the node they belong to. c1 should point
  5822  	// to the policy for NodeKey1, and c2 for NodeKey2.
  5823  	if c1 != nil && c1.ChannelFlags&lnwire.ChanUpdateDirection == 1 ||
  5824  		c2 != nil && c2.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
  5825  
  5826  		c2, c1 = c1, c2
  5827  	}
  5828  
  5829  	var lastUpdate int64
  5830  	if c1 != nil {
  5831  		lastUpdate = c1.LastUpdate.Unix()
  5832  	}
  5833  	if c2 != nil && c2.LastUpdate.Unix() > lastUpdate {
  5834  		lastUpdate = c2.LastUpdate.Unix()
  5835  	}
  5836  
  5837  	edge := &lnrpc.ChannelEdge{
  5838  		ChannelId: edgeInfo.ChannelID,
  5839  		ChanPoint: edgeInfo.ChannelPoint.String(),
  5840  		// TODO(roasbeef): update should be on edge info itself
  5841  		LastUpdate: uint32(lastUpdate),
  5842  		Node1Pub:   hex.EncodeToString(edgeInfo.NodeKey1Bytes[:]),
  5843  		Node2Pub:   hex.EncodeToString(edgeInfo.NodeKey2Bytes[:]),
  5844  		Capacity:   int64(edgeInfo.Capacity),
  5845  	}
  5846  
  5847  	if c1 != nil {
  5848  		edge.Node1Policy = &lnrpc.RoutingPolicy{
  5849  			TimeLockDelta:      uint32(c1.TimeLockDelta),
  5850  			MinHtlc:            int64(c1.MinHTLC),
  5851  			MaxHtlcMAtoms:      uint64(c1.MaxHTLC),
  5852  			FeeBaseMAtoms:      int64(c1.FeeBaseMAtoms),
  5853  			FeeRateMilliMAtoms: int64(c1.FeeProportionalMillionths),
  5854  			Disabled:           c1.ChannelFlags&lnwire.ChanUpdateDisabled != 0,
  5855  			LastUpdate:         uint32(c1.LastUpdate.Unix()),
  5856  		}
  5857  	}
  5858  
  5859  	if c2 != nil {
  5860  		edge.Node2Policy = &lnrpc.RoutingPolicy{
  5861  			TimeLockDelta:      uint32(c2.TimeLockDelta),
  5862  			MinHtlc:            int64(c2.MinHTLC),
  5863  			MaxHtlcMAtoms:      uint64(c2.MaxHTLC),
  5864  			FeeBaseMAtoms:      int64(c2.FeeBaseMAtoms),
  5865  			FeeRateMilliMAtoms: int64(c2.FeeProportionalMillionths),
  5866  			Disabled:           c2.ChannelFlags&lnwire.ChanUpdateDisabled != 0,
  5867  			LastUpdate:         uint32(c2.LastUpdate.Unix()),
  5868  		}
  5869  	}
  5870  
  5871  	return edge
  5872  }
  5873  
  5874  // GetNodeMetrics returns all available node metrics calculated from the
  5875  // current channel graph.
  5876  func (r *rpcServer) GetNodeMetrics(ctx context.Context,
  5877  	req *lnrpc.NodeMetricsRequest) (*lnrpc.NodeMetricsResponse, error) {
  5878  
  5879  	// Get requested metric types.
  5880  	getCentrality := false
  5881  	for _, t := range req.Types {
  5882  		if t == lnrpc.NodeMetricType_BETWEENNESS_CENTRALITY {
  5883  			getCentrality = true
  5884  		}
  5885  	}
  5886  
  5887  	// Only centrality can be requested for now.
  5888  	if !getCentrality {
  5889  		return nil, nil
  5890  	}
  5891  
  5892  	resp := &lnrpc.NodeMetricsResponse{
  5893  		BetweennessCentrality: make(map[string]*lnrpc.FloatMetric),
  5894  	}
  5895  
  5896  	// Obtain the pointer to the global singleton channel graph, this will
  5897  	// provide a consistent view of the graph due to bolt db's
  5898  	// transactional model.
  5899  	graph := r.server.graphDB
  5900  
  5901  	// Calculate betweenness centrality if requested. Note that depending on the
  5902  	// graph size, this may take up to a few minutes.
  5903  	channelGraph := autopilot.ChannelGraphFromDatabase(graph)
  5904  	centralityMetric, err := autopilot.NewBetweennessCentralityMetric(
  5905  		runtime.NumCPU(),
  5906  	)
  5907  	if err != nil {
  5908  		return nil, err
  5909  	}
  5910  	if err := centralityMetric.Refresh(channelGraph); err != nil {
  5911  		return nil, err
  5912  	}
  5913  
  5914  	// Fill normalized and non normalized centrality.
  5915  	centrality := centralityMetric.GetMetric(true)
  5916  	for nodeID, val := range centrality {
  5917  		resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])] =
  5918  			&lnrpc.FloatMetric{
  5919  				NormalizedValue: val,
  5920  			}
  5921  	}
  5922  
  5923  	centrality = centralityMetric.GetMetric(false)
  5924  	for nodeID, val := range centrality {
  5925  		resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])].Value = val
  5926  	}
  5927  
  5928  	return resp, nil
  5929  }
  5930  
  5931  // GetChanInfo returns the latest authenticated network announcement for the
  5932  // given channel identified by its channel ID: an 8-byte integer which uniquely
  5933  // identifies the location of transaction's funding output within the block
  5934  // chain.
  5935  func (r *rpcServer) GetChanInfo(ctx context.Context,
  5936  	in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) {
  5937  
  5938  	graph := r.server.graphDB
  5939  
  5940  	edgeInfo, edge1, edge2, err := graph.FetchChannelEdgesByID(in.ChanId)
  5941  	if err != nil {
  5942  		return nil, err
  5943  	}
  5944  
  5945  	// Convert the database's edge format into the network/RPC edge format
  5946  	// which couples the edge itself along with the directional node
  5947  	// routing policies of each node involved within the channel.
  5948  	channelEdge := marshalDbEdge(edgeInfo, edge1, edge2)
  5949  
  5950  	return channelEdge, nil
  5951  }
  5952  
  5953  // GetNodeInfo returns the latest advertised and aggregate authenticated
  5954  // channel information for the specified node identified by its public key.
  5955  func (r *rpcServer) GetNodeInfo(ctx context.Context,
  5956  	in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) {
  5957  
  5958  	graph := r.server.graphDB
  5959  
  5960  	// First, parse the hex-encoded public key into a full in-memory public
  5961  	// key object we can work with for querying.
  5962  	pubKey, err := route.NewVertexFromStr(in.PubKey)
  5963  	if err != nil {
  5964  		return nil, err
  5965  	}
  5966  
  5967  	// With the public key decoded, attempt to fetch the node corresponding
  5968  	// to this public key. If the node cannot be found, then an error will
  5969  	// be returned.
  5970  	node, err := graph.FetchLightningNode(pubKey)
  5971  	switch {
  5972  	case err == channeldb.ErrGraphNodeNotFound:
  5973  		return nil, status.Error(codes.NotFound, err.Error())
  5974  	case err != nil:
  5975  		return nil, err
  5976  	}
  5977  
  5978  	// With the node obtained, we'll now iterate through all its out going
  5979  	// edges to gather some basic statistics about its out going channels.
  5980  	var (
  5981  		numChannels   uint32
  5982  		totalCapacity dcrutil.Amount
  5983  		channels      []*lnrpc.ChannelEdge
  5984  	)
  5985  
  5986  	if err := node.ForEachChannel(nil, func(_ kvdb.RTx,
  5987  		edge *channeldb.ChannelEdgeInfo,
  5988  		c1, c2 *channeldb.ChannelEdgePolicy) error {
  5989  
  5990  		numChannels++
  5991  		totalCapacity += edge.Capacity
  5992  
  5993  		// Only populate the node's channels if the user requested them.
  5994  		if in.IncludeChannels {
  5995  			// Do not include unannounced channels - private
  5996  			// channels or public channels whose authentication
  5997  			// proof were not confirmed yet.
  5998  			if edge.AuthProof == nil {
  5999  				return nil
  6000  			}
  6001  
  6002  			// Convert the database's edge format into the
  6003  			// network/RPC edge format.
  6004  			channelEdge := marshalDbEdge(edge, c1, c2)
  6005  			channels = append(channels, channelEdge)
  6006  		}
  6007  
  6008  		return nil
  6009  	}); err != nil {
  6010  		return nil, err
  6011  	}
  6012  
  6013  	nodeAddrs := make([]*lnrpc.NodeAddress, 0)
  6014  	for _, addr := range node.Addresses {
  6015  		nodeAddr := &lnrpc.NodeAddress{
  6016  			Network: addr.Network(),
  6017  			Addr:    addr.String(),
  6018  		}
  6019  		nodeAddrs = append(nodeAddrs, nodeAddr)
  6020  	}
  6021  
  6022  	features := invoicesrpc.CreateRPCFeatures(node.Features)
  6023  
  6024  	return &lnrpc.NodeInfo{
  6025  		Node: &lnrpc.LightningNode{
  6026  			LastUpdate: uint32(node.LastUpdate.Unix()),
  6027  			PubKey:     in.PubKey,
  6028  			Addresses:  nodeAddrs,
  6029  			Alias:      node.Alias,
  6030  			Color:      routing.EncodeHexColor(node.Color),
  6031  			Features:   features,
  6032  		},
  6033  		NumChannels:   numChannels,
  6034  		TotalCapacity: int64(totalCapacity),
  6035  		Channels:      channels,
  6036  	}, nil
  6037  }
  6038  
  6039  // EnforceNodePing attempts to ping the specified peer. If it doesn't respond
  6040  // before this function's context is canceled, then the peer is forced to
  6041  // disconnect.
  6042  func (r *rpcServer) EnforceNodePing(ctx context.Context,
  6043  	in *lnrpc.EnforceNodePingRequest) (*lnrpc.EnforceNodePingResponse, error) {
  6044  
  6045  	pubKeyBytes, err := hex.DecodeString(in.PubKey)
  6046  	if err != nil {
  6047  		return nil, fmt.Errorf("unable to decode pubkey bytes: %v", err)
  6048  	}
  6049  	p, err := r.server.FindPeerByPubStr(string(pubKeyBytes))
  6050  	if err != nil {
  6051  		return nil, err
  6052  	}
  6053  
  6054  	pingTime, err := p.EnforcePing(ctx)
  6055  	if err != nil {
  6056  		return nil, err
  6057  	}
  6058  
  6059  	return &lnrpc.EnforceNodePingResponse{
  6060  		PingTimeMicro: pingTime.Microseconds(),
  6061  	}, nil
  6062  }
  6063  
  6064  // QueryRoutes attempts to query the daemons' Channel Router for a possible
  6065  // route to a target destination capable of carrying a specific amount of
  6066  // atoms within the route's flow. The retuned route contains the full
  6067  // details required to craft and send an HTLC, also including the necessary
  6068  // information that should be present within the Sphinx packet encapsulated
  6069  // within the HTLC.
  6070  //
  6071  // TODO(roasbeef): should return a slice of routes in reality
  6072  //   - create separate PR to send based on well formatted route
  6073  func (r *rpcServer) QueryRoutes(ctx context.Context,
  6074  	in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) {
  6075  
  6076  	return r.routerBackend.QueryRoutes(ctx, in)
  6077  }
  6078  
  6079  // GetNetworkInfo returns some basic stats about the known channel graph from
  6080  // the PoV of the node.
  6081  func (r *rpcServer) GetNetworkInfo(ctx context.Context,
  6082  	_ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) {
  6083  
  6084  	graph := r.server.graphDB
  6085  
  6086  	var (
  6087  		numNodes             uint32
  6088  		numChannels          uint32
  6089  		maxChanOut           uint32
  6090  		totalNetworkCapacity dcrutil.Amount
  6091  		minChannelSize       dcrutil.Amount = math.MaxInt64
  6092  		maxChannelSize       dcrutil.Amount
  6093  		medianChanSize       dcrutil.Amount
  6094  	)
  6095  
  6096  	// We'll use this map to de-duplicate channels during our traversal.
  6097  	// This is needed since channels are directional, so there will be two
  6098  	// edges for each channel within the graph.
  6099  	seenChans := make(map[uint64]struct{})
  6100  
  6101  	// We also keep a list of all encountered capacities, in order to
  6102  	// calculate the median channel size.
  6103  	var allChans []dcrutil.Amount
  6104  
  6105  	// We'll run through all the known nodes in the within our view of the
  6106  	// network, tallying up the total number of nodes, and also gathering
  6107  	// each node so we can measure the graph diameter and degree stats
  6108  	// below.
  6109  	err := graph.ForEachNodeCached(func(node route.Vertex,
  6110  		edges map[uint64]*channeldb.DirectedChannel) error {
  6111  
  6112  		// Increment the total number of nodes with each iteration.
  6113  		numNodes++
  6114  
  6115  		// For each channel we'll compute the out degree of each node,
  6116  		// and also update our running tallies of the min/max channel
  6117  		// capacity, as well as the total channel capacity. We pass
  6118  		// through the db transaction from the outer view so we can
  6119  		// re-use it within this inner view.
  6120  		var outDegree uint32
  6121  		for _, edge := range edges {
  6122  			// Bump up the out degree for this node for each
  6123  			// channel encountered.
  6124  			outDegree++
  6125  
  6126  			// If we've already seen this channel, then we'll
  6127  			// return early to ensure that we don't double-count
  6128  			// stats.
  6129  			if _, ok := seenChans[edge.ChannelID]; ok {
  6130  				return nil
  6131  			}
  6132  
  6133  			// Compare the capacity of this channel against the
  6134  			// running min/max to see if we should update the
  6135  			// extrema.
  6136  			chanCapacity := edge.Capacity
  6137  			if chanCapacity < minChannelSize {
  6138  				minChannelSize = chanCapacity
  6139  			}
  6140  			if chanCapacity > maxChannelSize {
  6141  				maxChannelSize = chanCapacity
  6142  			}
  6143  
  6144  			// Accumulate the total capacity of this channel to the
  6145  			// network wide-capacity.
  6146  			totalNetworkCapacity += chanCapacity
  6147  
  6148  			numChannels++
  6149  
  6150  			seenChans[edge.ChannelID] = struct{}{}
  6151  			allChans = append(allChans, edge.Capacity)
  6152  		}
  6153  
  6154  		// Finally, if the out degree of this node is greater than what
  6155  		// we've seen so far, update the maxChanOut variable.
  6156  		if outDegree > maxChanOut {
  6157  			maxChanOut = outDegree
  6158  		}
  6159  
  6160  		return nil
  6161  	})
  6162  	if err != nil {
  6163  		return nil, err
  6164  	}
  6165  
  6166  	// Query the graph for the current number of zombie channels.
  6167  	numZombies, err := graph.NumZombies()
  6168  	if err != nil {
  6169  		return nil, err
  6170  	}
  6171  
  6172  	// Find the median.
  6173  	medianChanSize = autopilot.Median(allChans)
  6174  
  6175  	// If we don't have any channels, then reset the minChannelSize to zero
  6176  	// to avoid outputting NaN in encoded JSON.
  6177  	if numChannels == 0 {
  6178  		minChannelSize = 0
  6179  	}
  6180  
  6181  	// TODO(roasbeef): graph diameter
  6182  
  6183  	// TODO(roasbeef): also add oldest channel?
  6184  	netInfo := &lnrpc.NetworkInfo{
  6185  		MaxOutDegree:         maxChanOut,
  6186  		AvgOutDegree:         float64(2*numChannels) / float64(numNodes),
  6187  		NumNodes:             numNodes,
  6188  		NumChannels:          numChannels,
  6189  		TotalNetworkCapacity: int64(totalNetworkCapacity),
  6190  		AvgChannelSize:       float64(totalNetworkCapacity) / float64(numChannels),
  6191  
  6192  		MinChannelSize:       int64(minChannelSize),
  6193  		MaxChannelSize:       int64(maxChannelSize),
  6194  		MedianChannelSizeSat: int64(medianChanSize),
  6195  		NumZombieChans:       numZombies,
  6196  	}
  6197  
  6198  	// Similarly, if we don't have any channels, then we'll also set the
  6199  	// average channel size to zero in order to avoid weird JSON encoding
  6200  	// outputs.
  6201  	if numChannels == 0 {
  6202  		netInfo.AvgChannelSize = 0
  6203  	}
  6204  
  6205  	return netInfo, nil
  6206  }
  6207  
  6208  // StopDaemon will send a shutdown request to the interrupt handler, triggering
  6209  // a graceful shutdown of the daemon.
  6210  func (r *rpcServer) StopDaemon(ctx context.Context,
  6211  	_ *lnrpc.StopRequest) (*lnrpc.StopResponse, error) {
  6212  	r.interceptor.RequestShutdown()
  6213  	return &lnrpc.StopResponse{}, nil
  6214  }
  6215  
  6216  // SubscribeChannelGraph launches a streaming RPC that allows the caller to
  6217  // receive notifications upon any changes the channel graph topology from the
  6218  // review of the responding node. Events notified include: new nodes coming
  6219  // online, nodes updating their authenticated attributes, new channels being
  6220  // advertised, updates in the routing policy for a directional channel edge,
  6221  // and finally when prior channels are closed on-chain.
  6222  func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
  6223  	updateStream lnrpc.Lightning_SubscribeChannelGraphServer) error {
  6224  
  6225  	// First, we start by subscribing to a new intent to receive
  6226  	// notifications from the channel router.
  6227  	client, err := r.server.chanRouter.SubscribeTopology()
  6228  	if err != nil {
  6229  		return err
  6230  	}
  6231  
  6232  	// Ensure that the resources for the topology update client is cleaned
  6233  	// up once either the server, or client exists.
  6234  	defer client.Cancel()
  6235  
  6236  	for {
  6237  		select {
  6238  
  6239  		// A new update has been sent by the channel router, we'll
  6240  		// marshal it into the form expected by the gRPC client, then
  6241  		// send it off.
  6242  		case topChange, ok := <-client.TopologyChanges:
  6243  			// If the second value from the channel read is nil,
  6244  			// then this means that the channel router is exiting
  6245  			// or the notification client was canceled. So we'll
  6246  			// exit early.
  6247  			if !ok {
  6248  				return errors.New("server shutting down")
  6249  			}
  6250  
  6251  			// Convert the struct from the channel router into the
  6252  			// form expected by the gRPC service then send it off
  6253  			// to the client.
  6254  			graphUpdate := marshallTopologyChange(topChange)
  6255  			if err := updateStream.Send(graphUpdate); err != nil {
  6256  				return err
  6257  			}
  6258  
  6259  		// The response stream's context for whatever reason has been
  6260  		// closed. If context is closed by an exceeded deadline
  6261  		// we will return an error.
  6262  		case <-updateStream.Context().Done():
  6263  			if errors.Is(updateStream.Context().Err(), context.Canceled) {
  6264  				return nil
  6265  			}
  6266  			return updateStream.Context().Err()
  6267  
  6268  		// The server is quitting, so we'll exit immediately. Returning
  6269  		// nil will close the clients read end of the stream.
  6270  		case <-r.quit:
  6271  			return nil
  6272  		}
  6273  	}
  6274  }
  6275  
  6276  // marshallTopologyChange performs a mapping from the topology change struct
  6277  // returned by the router to the form of notifications expected by the current
  6278  // gRPC service.
  6279  func marshallTopologyChange(topChange *routing.TopologyChange) *lnrpc.GraphTopologyUpdate {
  6280  
  6281  	// encodeKey is a simple helper function that converts a live public
  6282  	// key into a hex-encoded version of the compressed serialization for
  6283  	// the public key.
  6284  	encodeKey := func(k *secp256k1.PublicKey) string {
  6285  		return hex.EncodeToString(k.SerializeCompressed())
  6286  	}
  6287  
  6288  	nodeUpdates := make([]*lnrpc.NodeUpdate, len(topChange.NodeUpdates))
  6289  	for i, nodeUpdate := range topChange.NodeUpdates {
  6290  		nodeAddrs := make([]*lnrpc.NodeAddress, 0, len(nodeUpdate.Addresses))
  6291  		for _, addr := range nodeUpdate.Addresses {
  6292  			nodeAddr := &lnrpc.NodeAddress{
  6293  				Network: addr.Network(),
  6294  				Addr:    addr.String(),
  6295  			}
  6296  			nodeAddrs = append(nodeAddrs, nodeAddr)
  6297  		}
  6298  
  6299  		addrs := make([]string, len(nodeUpdate.Addresses))
  6300  		for i, addr := range nodeUpdate.Addresses {
  6301  			addrs[i] = addr.String()
  6302  		}
  6303  
  6304  		nodeUpdates[i] = &lnrpc.NodeUpdate{
  6305  			Addresses:     addrs,
  6306  			NodeAddresses: nodeAddrs,
  6307  			IdentityKey:   encodeKey(nodeUpdate.IdentityKey),
  6308  			Alias:         nodeUpdate.Alias,
  6309  			Color:         nodeUpdate.Color,
  6310  			Features: invoicesrpc.CreateRPCFeatures(
  6311  				nodeUpdate.Features,
  6312  			),
  6313  		}
  6314  	}
  6315  
  6316  	channelUpdates := make([]*lnrpc.ChannelEdgeUpdate, len(topChange.ChannelEdgeUpdates))
  6317  	for i, channelUpdate := range topChange.ChannelEdgeUpdates {
  6318  		channelUpdates[i] = &lnrpc.ChannelEdgeUpdate{
  6319  			ChanId: channelUpdate.ChanID,
  6320  			ChanPoint: &lnrpc.ChannelPoint{
  6321  				FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  6322  					FundingTxidBytes: channelUpdate.ChanPoint.Hash[:],
  6323  				},
  6324  				OutputIndex: channelUpdate.ChanPoint.Index,
  6325  			},
  6326  			Capacity: int64(channelUpdate.Capacity),
  6327  			RoutingPolicy: &lnrpc.RoutingPolicy{
  6328  				TimeLockDelta:      uint32(channelUpdate.TimeLockDelta),
  6329  				MinHtlc:            int64(channelUpdate.MinHTLC),
  6330  				MaxHtlcMAtoms:      uint64(channelUpdate.MaxHTLC),
  6331  				FeeBaseMAtoms:      int64(channelUpdate.BaseFee),
  6332  				FeeRateMilliMAtoms: int64(channelUpdate.FeeRate),
  6333  				Disabled:           channelUpdate.Disabled,
  6334  			},
  6335  			AdvertisingNode: encodeKey(channelUpdate.AdvertisingNode),
  6336  			ConnectingNode:  encodeKey(channelUpdate.ConnectingNode),
  6337  		}
  6338  	}
  6339  
  6340  	closedChans := make([]*lnrpc.ClosedChannelUpdate, len(topChange.ClosedChannels))
  6341  	for i, closedChan := range topChange.ClosedChannels {
  6342  		closedChans[i] = &lnrpc.ClosedChannelUpdate{
  6343  			ChanId:       closedChan.ChanID,
  6344  			Capacity:     int64(closedChan.Capacity),
  6345  			ClosedHeight: closedChan.ClosedHeight,
  6346  			ChanPoint: &lnrpc.ChannelPoint{
  6347  				FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  6348  					FundingTxidBytes: closedChan.ChanPoint.Hash[:],
  6349  				},
  6350  				OutputIndex: closedChan.ChanPoint.Index,
  6351  			},
  6352  		}
  6353  	}
  6354  
  6355  	return &lnrpc.GraphTopologyUpdate{
  6356  		NodeUpdates:    nodeUpdates,
  6357  		ChannelUpdates: channelUpdates,
  6358  		ClosedChans:    closedChans,
  6359  	}
  6360  }
  6361  
  6362  // ListPayments returns a list of outgoing payments determined by a paginated
  6363  // database query.
  6364  func (r *rpcServer) ListPayments(ctx context.Context,
  6365  	req *lnrpc.ListPaymentsRequest) (*lnrpc.ListPaymentsResponse, error) {
  6366  
  6367  	rpcsLog.Debugf("[ListPayments]")
  6368  
  6369  	query := channeldb.PaymentsQuery{
  6370  		IndexOffset:       req.IndexOffset,
  6371  		MaxPayments:       req.MaxPayments,
  6372  		Reversed:          req.Reversed,
  6373  		IncludeIncomplete: req.IncludeIncomplete,
  6374  	}
  6375  
  6376  	// If the maximum number of payments wasn't specified, then we'll
  6377  	// default to return the maximal number of payments representable.
  6378  	if req.MaxPayments == 0 {
  6379  		query.MaxPayments = math.MaxUint64
  6380  	}
  6381  
  6382  	paymentsQuerySlice, err := r.server.miscDB.QueryPayments(query)
  6383  	if err != nil {
  6384  		return nil, err
  6385  	}
  6386  
  6387  	paymentsResp := &lnrpc.ListPaymentsResponse{
  6388  		LastIndexOffset:  paymentsQuerySlice.LastIndexOffset,
  6389  		FirstIndexOffset: paymentsQuerySlice.FirstIndexOffset,
  6390  	}
  6391  
  6392  	for _, payment := range paymentsQuerySlice.Payments {
  6393  		payment := payment
  6394  
  6395  		rpcPayment, err := r.routerBackend.MarshallPayment(payment)
  6396  		if err != nil {
  6397  			return nil, err
  6398  		}
  6399  
  6400  		paymentsResp.Payments = append(
  6401  			paymentsResp.Payments, rpcPayment,
  6402  		)
  6403  	}
  6404  
  6405  	return paymentsResp, nil
  6406  }
  6407  
  6408  // DeletePayment deletes a payment from the DB given its payment hash. If
  6409  // failedHtlcsOnly is set, only failed HTLC attempts of the payment will be
  6410  // deleted.
  6411  func (r *rpcServer) DeletePayment(ctx context.Context,
  6412  	req *lnrpc.DeletePaymentRequest) (
  6413  	*lnrpc.DeletePaymentResponse, error) {
  6414  
  6415  	hash, err := lntypes.MakeHash(req.PaymentHash)
  6416  	if err != nil {
  6417  		return nil, err
  6418  	}
  6419  
  6420  	rpcsLog.Infof("[DeletePayment] payment_identifier=%v, "+
  6421  		"failed_htlcs_only=%v", hash, req.FailedHtlcsOnly)
  6422  
  6423  	err = r.server.miscDB.DeletePayment(hash, req.FailedHtlcsOnly)
  6424  	if err != nil {
  6425  		return nil, err
  6426  	}
  6427  
  6428  	return &lnrpc.DeletePaymentResponse{}, nil
  6429  }
  6430  
  6431  // DeleteAllPayments deletes all outgoing payments from DB.
  6432  func (r *rpcServer) DeleteAllPayments(ctx context.Context,
  6433  	req *lnrpc.DeleteAllPaymentsRequest) (
  6434  	*lnrpc.DeleteAllPaymentsResponse, error) {
  6435  
  6436  	rpcsLog.Infof("[DeleteAllPayments] failed_payments_only=%v, "+
  6437  		"failed_htlcs_only=%v", req.FailedPaymentsOnly,
  6438  		req.FailedHtlcsOnly)
  6439  
  6440  	err := r.server.miscDB.DeletePayments(
  6441  		req.FailedPaymentsOnly, req.FailedHtlcsOnly,
  6442  	)
  6443  	if err != nil {
  6444  		return nil, err
  6445  	}
  6446  
  6447  	return &lnrpc.DeleteAllPaymentsResponse{}, nil
  6448  }
  6449  
  6450  // DebugLevel allows a caller to programmatically set the logging verbosity of
  6451  // lnd. The logging can be targeted according to a coarse daemon-wide logging
  6452  // level, or in a granular fashion to specify the logging for a target
  6453  // sub-system.
  6454  func (r *rpcServer) DebugLevel(ctx context.Context,
  6455  	req *lnrpc.DebugLevelRequest) (*lnrpc.DebugLevelResponse, error) {
  6456  
  6457  	// If show is set, then we simply print out the list of available
  6458  	// sub-systems.
  6459  	if req.Show {
  6460  		return &lnrpc.DebugLevelResponse{
  6461  			SubSystems: strings.Join(
  6462  				r.cfg.LogWriter.SupportedSubsystems(), " ",
  6463  			),
  6464  		}, nil
  6465  	}
  6466  
  6467  	rpcsLog.Infof("[debuglevel] changing debug level to: %v", req.LevelSpec)
  6468  
  6469  	// Otherwise, we'll attempt to set the logging level using the
  6470  	// specified level spec.
  6471  	err := build.ParseAndSetDebugLevels(req.LevelSpec, r.cfg.LogWriter)
  6472  	if err != nil {
  6473  		return nil, err
  6474  	}
  6475  
  6476  	return &lnrpc.DebugLevelResponse{}, nil
  6477  }
  6478  
  6479  func (r *rpcServer) CalcPaymentStats(ctx context.Context,
  6480  	req *lnrpc.CalcPaymentStatsRequest) (*lnrpc.CalcPaymentStatsResponse, error) {
  6481  
  6482  	stats, err := r.server.miscDB.CalcPaymentStats()
  6483  	if err != nil {
  6484  		return nil, err
  6485  	}
  6486  	return &lnrpc.CalcPaymentStatsResponse{
  6487  		Total:     stats.Total,
  6488  		Failed:    stats.Failed,
  6489  		Succeeded: stats.Succeeded,
  6490  
  6491  		HtlcAttempts: stats.HTLCAttempts,
  6492  		HtlcFailed:   stats.HTLCFailed,
  6493  		HtlcSettled:  stats.HTLCSettled,
  6494  
  6495  		OldDupePayments: stats.OldDupePayments,
  6496  	}, nil
  6497  }
  6498  
  6499  // DecodePayReq takes an encoded payment request string and attempts to decode
  6500  // it, returning a full description of the conditions encoded within the
  6501  // payment request.
  6502  func (r *rpcServer) DecodePayReq(ctx context.Context,
  6503  	req *lnrpc.PayReqString) (*lnrpc.PayReq, error) {
  6504  
  6505  	rpcsLog.Tracef("[decodepayreq] decoding: %v", req.PayReq)
  6506  
  6507  	// Fist we'll attempt to decode the payment request string, if the
  6508  	// request is invalid or the checksum doesn't match, then we'll exit
  6509  	// here with an error.
  6510  	payReq, err := zpay32.Decode(req.PayReq, r.cfg.ActiveNetParams.Params)
  6511  	if err != nil {
  6512  		return nil, err
  6513  	}
  6514  
  6515  	// Let the fields default to empty strings.
  6516  	desc := ""
  6517  	if payReq.Description != nil {
  6518  		desc = *payReq.Description
  6519  	}
  6520  
  6521  	descHash := []byte("")
  6522  	if payReq.DescriptionHash != nil {
  6523  		descHash = payReq.DescriptionHash[:]
  6524  	}
  6525  
  6526  	fallbackAddr := ""
  6527  	if payReq.FallbackAddr != nil {
  6528  		fallbackAddr = payReq.FallbackAddr.String()
  6529  	}
  6530  
  6531  	// Expiry time will default to 3600 seconds if not specified
  6532  	// explicitly.
  6533  	expiry := int64(payReq.Expiry().Seconds())
  6534  
  6535  	// Convert between the `lnrpc` and `routing` types.
  6536  	routeHints := invoicesrpc.CreateRPCRouteHints(payReq.RouteHints)
  6537  
  6538  	var amtAtoms, amtMAtoms int64
  6539  	if payReq.MilliAt != nil {
  6540  		amtAtoms = int64(payReq.MilliAt.ToAtoms())
  6541  		amtMAtoms = int64(*payReq.MilliAt)
  6542  	}
  6543  
  6544  	// Extract the payment address from the payment request, if present.
  6545  	var paymentAddr []byte
  6546  	if payReq.PaymentAddr != nil {
  6547  		paymentAddr = payReq.PaymentAddr[:]
  6548  	}
  6549  
  6550  	dest := payReq.Destination.SerializeCompressed()
  6551  	return &lnrpc.PayReq{
  6552  		Destination:     hex.EncodeToString(dest),
  6553  		PaymentHash:     hex.EncodeToString(payReq.PaymentHash[:]),
  6554  		NumAtoms:        amtAtoms,
  6555  		NumMAtoms:       amtMAtoms,
  6556  		Timestamp:       payReq.Timestamp.Unix(),
  6557  		Description:     desc,
  6558  		DescriptionHash: hex.EncodeToString(descHash),
  6559  		FallbackAddr:    fallbackAddr,
  6560  		Expiry:          expiry,
  6561  		CltvExpiry:      int64(payReq.MinFinalCLTVExpiry()),
  6562  		RouteHints:      routeHints,
  6563  		PaymentAddr:     paymentAddr,
  6564  		Features:        invoicesrpc.CreateRPCFeatures(payReq.Features),
  6565  	}, nil
  6566  }
  6567  
  6568  // feeBase is the fixed point that fee rate computation are performed over.
  6569  // Nodes on the network advertise their fee rate using this point as a base.
  6570  // This means that the minimal possible fee rate if 1e-6, or 0.000001, or
  6571  // 0.0001%.
  6572  const feeBase float64 = 1000000
  6573  
  6574  // FeeReport allows the caller to obtain a report detailing the current fee
  6575  // schedule enforced by the node globally for each channel.
  6576  func (r *rpcServer) FeeReport(ctx context.Context,
  6577  	_ *lnrpc.FeeReportRequest) (*lnrpc.FeeReportResponse, error) {
  6578  
  6579  	// TODO(roasbeef): use UnaryInterceptor to add automated logging
  6580  
  6581  	rpcsLog.Debugf("[feereport]")
  6582  
  6583  	channelGraph := r.server.graphDB
  6584  	selfNode, err := channelGraph.SourceNode()
  6585  	if err != nil {
  6586  		return nil, err
  6587  	}
  6588  
  6589  	var feeReports []*lnrpc.ChannelFeeReport
  6590  	err = selfNode.ForEachChannel(nil, func(_ kvdb.RTx, chanInfo *channeldb.ChannelEdgeInfo,
  6591  		edgePolicy, _ *channeldb.ChannelEdgePolicy) error {
  6592  
  6593  		// Self node should always have policies for its channels.
  6594  		if edgePolicy == nil {
  6595  			return fmt.Errorf("no policy for outgoing channel %v ",
  6596  				chanInfo.ChannelID)
  6597  		}
  6598  
  6599  		// We'll compute the effective fee rate by converting from a
  6600  		// fixed point fee rate to a floating point fee rate. The fee
  6601  		// rate field in the database the amount of milli-atoms charged per
  6602  		// 1mil milli-atoms sent, so will divide by this to get the proper fee
  6603  		// rate.
  6604  		feeRateFixedPoint := edgePolicy.FeeProportionalMillionths
  6605  		feeRate := float64(feeRateFixedPoint) / feeBase
  6606  
  6607  		// TODO(roasbeef): also add stats for revenue for each channel
  6608  		feeReports = append(feeReports, &lnrpc.ChannelFeeReport{
  6609  			ChanId:        chanInfo.ChannelID,
  6610  			ChannelPoint:  chanInfo.ChannelPoint.String(),
  6611  			BaseFeeMAtoms: int64(edgePolicy.FeeBaseMAtoms),
  6612  			FeePerMil:     int64(feeRateFixedPoint),
  6613  			FeeRate:       feeRate,
  6614  		})
  6615  
  6616  		return nil
  6617  	})
  6618  	if err != nil {
  6619  		return nil, err
  6620  	}
  6621  
  6622  	fwdEventLog := r.server.miscDB.ForwardingLog()
  6623  
  6624  	// computeFeeSum is a helper function that computes the total fees for
  6625  	// a particular time slice described by a forwarding event query.
  6626  	computeFeeSum := func(query channeldb.ForwardingEventQuery) (lnwire.MilliAtom, error) {
  6627  
  6628  		var totalFees lnwire.MilliAtom
  6629  
  6630  		// We'll continue to fetch the next query and accumulate the
  6631  		// fees until the next query returns no events.
  6632  		for {
  6633  			timeSlice, err := fwdEventLog.Query(query)
  6634  			if err != nil {
  6635  				return 0, err
  6636  			}
  6637  
  6638  			// If the timeslice is empty, then we'll return as
  6639  			// we've retrieved all the entries in this range.
  6640  			if len(timeSlice.ForwardingEvents) == 0 {
  6641  				break
  6642  			}
  6643  
  6644  			// Otherwise, we'll tally up an accumulate the total
  6645  			// fees for this time slice.
  6646  			for _, event := range timeSlice.ForwardingEvents {
  6647  				fee := event.AmtIn - event.AmtOut
  6648  				totalFees += fee
  6649  			}
  6650  
  6651  			// We'll now take the last offset index returned as
  6652  			// part of this response, and modify our query to start
  6653  			// at this index. This has a pagination effect in the
  6654  			// case that our query bounds has more than 100k
  6655  			// entries.
  6656  			query.IndexOffset = timeSlice.LastIndexOffset
  6657  		}
  6658  
  6659  		return totalFees, nil
  6660  	}
  6661  
  6662  	now := time.Now()
  6663  
  6664  	// Before we perform the queries below, we'll instruct the switch to
  6665  	// flush any pending events to disk. This ensure we get a complete
  6666  	// snapshot at this particular time.
  6667  	if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
  6668  		return nil, fmt.Errorf("unable to flush forwarding "+
  6669  			"events: %v", err)
  6670  	}
  6671  
  6672  	// In addition to returning the current fee schedule for each channel.
  6673  	// We'll also perform a series of queries to obtain the total fees
  6674  	// earned over the past day, week, and month.
  6675  	dayQuery := channeldb.ForwardingEventQuery{
  6676  		StartTime:    now.Add(-time.Hour * 24),
  6677  		EndTime:      now,
  6678  		NumMaxEvents: 1000,
  6679  	}
  6680  	dayFees, err := computeFeeSum(dayQuery)
  6681  	if err != nil {
  6682  		return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
  6683  	}
  6684  
  6685  	weekQuery := channeldb.ForwardingEventQuery{
  6686  		StartTime:    now.Add(-time.Hour * 24 * 7),
  6687  		EndTime:      now,
  6688  		NumMaxEvents: 1000,
  6689  	}
  6690  	weekFees, err := computeFeeSum(weekQuery)
  6691  	if err != nil {
  6692  		return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
  6693  	}
  6694  
  6695  	monthQuery := channeldb.ForwardingEventQuery{
  6696  		StartTime:    now.Add(-time.Hour * 24 * 30),
  6697  		EndTime:      now,
  6698  		NumMaxEvents: 1000,
  6699  	}
  6700  	monthFees, err := computeFeeSum(monthQuery)
  6701  	if err != nil {
  6702  		return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
  6703  	}
  6704  
  6705  	return &lnrpc.FeeReportResponse{
  6706  		ChannelFees: feeReports,
  6707  		DayFeeSum:   uint64(dayFees.ToAtoms()),
  6708  		WeekFeeSum:  uint64(weekFees.ToAtoms()),
  6709  		MonthFeeSum: uint64(monthFees.ToAtoms()),
  6710  	}, nil
  6711  }
  6712  
  6713  // minFeeRate is the smallest permitted fee rate within the network. This is
  6714  // derived by the fact that fee rates are computed using a fixed point of
  6715  // 1,000,000. As a result, the smallest representable fee rate is 1e-6, or
  6716  // 0.000001, or 0.0001%.
  6717  const minFeeRate = 1e-6
  6718  
  6719  // UpdateChannelPolicy allows the caller to update the channel forwarding policy
  6720  // for all channels globally, or a particular channel.
  6721  func (r *rpcServer) UpdateChannelPolicy(ctx context.Context,
  6722  	req *lnrpc.PolicyUpdateRequest) (*lnrpc.PolicyUpdateResponse, error) {
  6723  
  6724  	var targetChans []wire.OutPoint
  6725  	switch scope := req.Scope.(type) {
  6726  	// If the request is targeting all active channels, then we don't need
  6727  	// target any channels by their channel point.
  6728  	case *lnrpc.PolicyUpdateRequest_Global:
  6729  
  6730  	// Otherwise, we're targeting an individual channel by its channel
  6731  	// point.
  6732  	case *lnrpc.PolicyUpdateRequest_ChanPoint:
  6733  		txid, err := lnrpc.GetChanPointFundingTxid(scope.ChanPoint)
  6734  		if err != nil {
  6735  			return nil, err
  6736  		}
  6737  		targetChans = append(targetChans, wire.OutPoint{
  6738  			Hash:  *txid,
  6739  			Index: scope.ChanPoint.OutputIndex,
  6740  		})
  6741  	default:
  6742  		return nil, fmt.Errorf("unknown scope: %v", scope)
  6743  	}
  6744  
  6745  	var feeRateFixed uint32
  6746  
  6747  	switch {
  6748  	// The request should use either the fee rate in percent, or the new
  6749  	// ppm rate, but not both.
  6750  	case req.FeeRate != 0 && req.FeeRatePpm != 0:
  6751  		errMsg := "cannot set both FeeRate and FeeRatePpm at the " +
  6752  			"same time"
  6753  
  6754  		return nil, status.Errorf(codes.InvalidArgument, errMsg)
  6755  
  6756  	// If the request is using fee_rate.
  6757  	case req.FeeRate != 0:
  6758  		// As a sanity check, if the fee isn't zero, we'll ensure that
  6759  		// the passed fee rate is below 1e-6, or the lowest allowed
  6760  		// non-zero fee rate expressible within the protocol.
  6761  		if req.FeeRate != 0 && req.FeeRate < minFeeRate {
  6762  			return nil, fmt.Errorf("fee rate of %v is too "+
  6763  				"small, min fee rate is %v", req.FeeRate,
  6764  				minFeeRate)
  6765  		}
  6766  
  6767  		// We'll also need to convert the floating point fee rate we
  6768  		// accept over RPC to the fixed point rate that we use within
  6769  		// the protocol. We do this by multiplying the passed fee rate
  6770  		// by the fee base. This gives us the fixed point, scaled by 1
  6771  		// million that's used within the protocol.
  6772  		//
  6773  		// Because of the inaccurate precision of the IEEE 754
  6774  		// standard, we need to round the product of feerate and
  6775  		// feebase.
  6776  		feeRateFixed = uint32(math.Round(req.FeeRate * feeBase))
  6777  
  6778  	// Otherwise, we use the fee_rate_ppm parameter.
  6779  	case req.FeeRatePpm != 0:
  6780  		feeRateFixed = req.FeeRatePpm
  6781  	}
  6782  
  6783  	// We'll also ensure that the user isn't setting a CLTV delta that
  6784  	// won't give outgoing HTLCs enough time to fully resolve if needed.
  6785  	if req.TimeLockDelta < minTimeLockDelta {
  6786  		return nil, fmt.Errorf("time lock delta of %v is too small, "+
  6787  			"minimum supported is %v", req.TimeLockDelta,
  6788  			minTimeLockDelta)
  6789  	}
  6790  
  6791  	baseFeeMAtoms := lnwire.MilliAtom(req.BaseFeeMAtoms)
  6792  	feeSchema := routing.FeeSchema{
  6793  		BaseFee: baseFeeMAtoms,
  6794  		FeeRate: feeRateFixed,
  6795  	}
  6796  
  6797  	maxHtlc := lnwire.MilliAtom(req.MaxHtlcMAtoms)
  6798  	var minHtlc *lnwire.MilliAtom
  6799  	if req.MinHtlcMAtomsSpecified {
  6800  		min := lnwire.MilliAtom(req.MinHtlcMAtoms)
  6801  		minHtlc = &min
  6802  	}
  6803  
  6804  	chanPolicy := routing.ChannelPolicy{
  6805  		FeeSchema:     feeSchema,
  6806  		TimeLockDelta: req.TimeLockDelta,
  6807  		MaxHTLC:       maxHtlc,
  6808  		MinHTLC:       minHtlc,
  6809  	}
  6810  
  6811  	rpcsLog.Debugf("[updatechanpolicy] updating channel policy base_fee=%v, "+
  6812  		"rate_fixed=%v, time_lock_delta: %v, "+
  6813  		"min_htlc=%v, max_htlc=%v, targets=%v",
  6814  		req.BaseFeeMAtoms, feeRateFixed, req.TimeLockDelta,
  6815  		minHtlc, maxHtlc,
  6816  		spew.Sdump(targetChans))
  6817  
  6818  	// With the scope resolved, we'll now send this to the local channel
  6819  	// manager so it can propagate the new policy for our target channel(s).
  6820  	failedUpdates, err := r.server.localChanMgr.UpdatePolicy(chanPolicy,
  6821  		targetChans...)
  6822  	if err != nil {
  6823  		return nil, err
  6824  	}
  6825  
  6826  	return &lnrpc.PolicyUpdateResponse{
  6827  		FailedUpdates: failedUpdates,
  6828  	}, nil
  6829  }
  6830  
  6831  // ForwardingHistory allows the caller to query the htlcswitch for a record of
  6832  // all HTLC's forwarded within the target time range, and integer offset within
  6833  // that time range. If no time-range is specified, then the first chunk of the
  6834  // past 24 hrs of forwarding history are returned.
  6835  
  6836  // A list of forwarding events are returned. The size of each forwarding event
  6837  // is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.
  6838  // In order to safely stay under this max limit, we'll return 50k events per
  6839  // response.  Each response has the index offset of the last entry. The index
  6840  // offset can be provided to the request to allow the caller to skip a series
  6841  // of records.
  6842  func (r *rpcServer) ForwardingHistory(ctx context.Context,
  6843  	req *lnrpc.ForwardingHistoryRequest) (*lnrpc.ForwardingHistoryResponse, error) {
  6844  
  6845  	rpcsLog.Debugf("[forwardinghistory]")
  6846  
  6847  	// Before we perform the queries below, we'll instruct the switch to
  6848  	// flush any pending events to disk. This ensure we get a complete
  6849  	// snapshot at this particular time.
  6850  	if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
  6851  		return nil, fmt.Errorf("unable to flush forwarding "+
  6852  			"events: %v", err)
  6853  	}
  6854  
  6855  	var (
  6856  		startTime, endTime time.Time
  6857  
  6858  		numEvents uint32
  6859  	)
  6860  
  6861  	// startTime defaults to the Unix epoch (0 unixtime, or midnight 01-01-1970).
  6862  	startTime = time.Unix(int64(req.StartTime), 0)
  6863  
  6864  	// If the end time wasn't specified, assume a default end time of now.
  6865  	if req.EndTime == 0 {
  6866  		now := time.Now()
  6867  		endTime = now
  6868  	} else {
  6869  		endTime = time.Unix(int64(req.EndTime), 0)
  6870  	}
  6871  
  6872  	// If the number of events wasn't specified, then we'll default to
  6873  	// returning the last 100 events.
  6874  	numEvents = req.NumMaxEvents
  6875  	if numEvents == 0 {
  6876  		numEvents = 100
  6877  	}
  6878  
  6879  	// Next, we'll map the proto request into a format that is understood by
  6880  	// the forwarding log.
  6881  	eventQuery := channeldb.ForwardingEventQuery{
  6882  		StartTime:    startTime,
  6883  		EndTime:      endTime,
  6884  		IndexOffset:  req.IndexOffset,
  6885  		NumMaxEvents: numEvents,
  6886  	}
  6887  	timeSlice, err := r.server.miscDB.ForwardingLog().Query(eventQuery)
  6888  	if err != nil {
  6889  		return nil, fmt.Errorf("unable to query forwarding log: %v", err)
  6890  	}
  6891  
  6892  	// TODO(roasbeef): add settlement latency?
  6893  	//  * use FPE on all records?
  6894  
  6895  	// With the events retrieved, we'll now map them into the proper proto
  6896  	// response.
  6897  	//
  6898  	// TODO(roasbeef): show in ns for the outside?
  6899  	resp := &lnrpc.ForwardingHistoryResponse{
  6900  		ForwardingEvents: make([]*lnrpc.ForwardingEvent, len(timeSlice.ForwardingEvents)),
  6901  		LastOffsetIndex:  timeSlice.LastIndexOffset,
  6902  	}
  6903  	for i, event := range timeSlice.ForwardingEvents {
  6904  		amtInMAtoms := event.AmtIn
  6905  		amtOutMAtoms := event.AmtOut
  6906  		feeMAtoms := event.AmtIn - event.AmtOut
  6907  
  6908  		resp.ForwardingEvents[i] = &lnrpc.ForwardingEvent{
  6909  			Timestamp:    uint64(event.Timestamp.Unix()),
  6910  			TimestampNs:  uint64(event.Timestamp.UnixNano()),
  6911  			ChanIdIn:     event.IncomingChanID.ToUint64(),
  6912  			ChanIdOut:    event.OutgoingChanID.ToUint64(),
  6913  			AmtIn:        uint64(amtInMAtoms.ToAtoms()),
  6914  			AmtOut:       uint64(amtOutMAtoms.ToAtoms()),
  6915  			Fee:          uint64(feeMAtoms.ToAtoms()),
  6916  			FeeMAtoms:    uint64(feeMAtoms),
  6917  			AmtInMAtoms:  uint64(amtInMAtoms),
  6918  			AmtOutMAtoms: uint64(amtOutMAtoms),
  6919  		}
  6920  	}
  6921  
  6922  	return resp, nil
  6923  }
  6924  
  6925  // ExportChannelBackup attempts to return an encrypted static channel backup
  6926  // for the target channel identified by it channel point. The backup is
  6927  // encrypted with a key generated from the aezeed seed of the user. The
  6928  // returned backup can either be restored using the RestoreChannelBackup method
  6929  // once lnd is running, or via the InitWallet and UnlockWallet methods from the
  6930  // WalletUnlocker service.
  6931  func (r *rpcServer) ExportChannelBackup(ctx context.Context,
  6932  	in *lnrpc.ExportChannelBackupRequest) (*lnrpc.ChannelBackup, error) {
  6933  
  6934  	// First, we'll convert the lnrpc channel point into a wire.OutPoint
  6935  	// that we can manipulate.
  6936  	txid, err := lnrpc.GetChanPointFundingTxid(in.ChanPoint)
  6937  	if err != nil {
  6938  		return nil, err
  6939  	}
  6940  	chanPoint := wire.OutPoint{
  6941  		Hash:  *txid,
  6942  		Index: in.ChanPoint.OutputIndex,
  6943  	}
  6944  
  6945  	// Next, we'll attempt to fetch a channel backup for this channel from
  6946  	// the database. If this channel has been closed, or the outpoint is
  6947  	// unknown, then we'll return an error
  6948  	unpackedBackup, err := chanbackup.FetchBackupForChan(
  6949  		chanPoint, r.server.chanStateDB, r.server.addrSource,
  6950  	)
  6951  	if err != nil {
  6952  		return nil, err
  6953  	}
  6954  
  6955  	// At this point, we have an unpacked backup (plaintext) so we'll now
  6956  	// attempt to serialize and encrypt it in order to create a packed
  6957  	// backup.
  6958  	packedBackups, err := chanbackup.PackStaticChanBackups(
  6959  		[]chanbackup.Single{*unpackedBackup},
  6960  		r.server.cc.KeyRing,
  6961  	)
  6962  	if err != nil {
  6963  		return nil, fmt.Errorf("packing of back ups failed: %v", err)
  6964  	}
  6965  
  6966  	// Before we proceed, we'll ensure that we received a backup for this
  6967  	// channel, otherwise, we'll bail out.
  6968  	packedBackup, ok := packedBackups[chanPoint]
  6969  	if !ok {
  6970  		return nil, fmt.Errorf("expected single backup for "+
  6971  			"ChannelPoint(%v), got %v", chanPoint,
  6972  			len(packedBackup))
  6973  	}
  6974  
  6975  	return &lnrpc.ChannelBackup{
  6976  		ChanPoint:  in.ChanPoint,
  6977  		ChanBackup: packedBackup,
  6978  	}, nil
  6979  }
  6980  
  6981  // VerifyChanBackup allows a caller to verify the integrity of a channel backup
  6982  // snapshot. This method will accept both either a packed Single or a packed
  6983  // Multi. Specifying both will result in an error.
  6984  func (r *rpcServer) VerifyChanBackup(ctx context.Context,
  6985  	in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) {
  6986  
  6987  	switch {
  6988  	// If neither a Single or Multi has been specified, then we have nothing
  6989  	// to verify.
  6990  	case in.GetSingleChanBackups() == nil && in.GetMultiChanBackup() == nil:
  6991  		return nil, errors.New("either a Single or Multi channel " +
  6992  			"backup must be specified")
  6993  
  6994  	// Either a Single or a Multi must be specified, but not both.
  6995  	case in.GetSingleChanBackups() != nil && in.GetMultiChanBackup() != nil:
  6996  		return nil, errors.New("either a Single or Multi channel " +
  6997  			"backup must be specified, but not both")
  6998  
  6999  	// If a Single is specified then we'll only accept one of them to allow
  7000  	// the caller to map the valid/invalid state for each individual Single.
  7001  	case in.GetSingleChanBackups() != nil:
  7002  		chanBackupsProtos := in.GetSingleChanBackups().ChanBackups
  7003  		if len(chanBackupsProtos) != 1 {
  7004  			return nil, errors.New("only one Single is accepted " +
  7005  				"at a time")
  7006  		}
  7007  
  7008  		// First, we'll convert the raw byte slice into a type we can
  7009  		// work with a bit better.
  7010  		chanBackup := chanbackup.PackedSingles(
  7011  			[][]byte{chanBackupsProtos[0].ChanBackup},
  7012  		)
  7013  
  7014  		// With our PackedSingles created, we'll attempt to unpack the
  7015  		// backup. If this fails, then we know the backup is invalid for
  7016  		// some reason.
  7017  		_, err := chanBackup.Unpack(r.server.cc.KeyRing)
  7018  		if err != nil {
  7019  			return nil, fmt.Errorf("invalid single channel "+
  7020  				"backup: %v", err)
  7021  		}
  7022  
  7023  	case in.GetMultiChanBackup() != nil:
  7024  		// We'll convert the raw byte slice into a PackedMulti that we
  7025  		// can easily work with.
  7026  		packedMultiBackup := in.GetMultiChanBackup().MultiChanBackup
  7027  		packedMulti := chanbackup.PackedMulti(packedMultiBackup)
  7028  
  7029  		// We'll now attempt to unpack the Multi. If this fails, then we
  7030  		// know it's invalid.
  7031  		_, err := packedMulti.Unpack(r.server.cc.KeyRing)
  7032  		if err != nil {
  7033  			return nil, fmt.Errorf("invalid multi channel backup: "+
  7034  				"%v", err)
  7035  		}
  7036  	}
  7037  
  7038  	return &lnrpc.VerifyChanBackupResponse{}, nil
  7039  }
  7040  
  7041  // createBackupSnapshot converts the passed Single backup into a snapshot which
  7042  // contains individual packed single backups, as well as a single packed multi
  7043  // backup.
  7044  func (r *rpcServer) createBackupSnapshot(backups []chanbackup.Single) (
  7045  	*lnrpc.ChanBackupSnapshot, error) {
  7046  
  7047  	// Once we have the set of back ups, we'll attempt to pack them all
  7048  	// into a series of single channel backups.
  7049  	singleChanPackedBackups, err := chanbackup.PackStaticChanBackups(
  7050  		backups, r.server.cc.KeyRing,
  7051  	)
  7052  	if err != nil {
  7053  		return nil, fmt.Errorf("unable to pack set of chan "+
  7054  			"backups: %v", err)
  7055  	}
  7056  
  7057  	// Now that we have our set of single packed backups, we'll morph that
  7058  	// into a form that the proto response requires.
  7059  	numBackups := len(singleChanPackedBackups)
  7060  	singleBackupResp := &lnrpc.ChannelBackups{
  7061  		ChanBackups: make([]*lnrpc.ChannelBackup, 0, numBackups),
  7062  	}
  7063  	for chanPoint, singlePackedBackup := range singleChanPackedBackups {
  7064  		txid := chanPoint.Hash
  7065  		rpcChanPoint := &lnrpc.ChannelPoint{
  7066  			FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  7067  				FundingTxidBytes: txid[:],
  7068  			},
  7069  			OutputIndex: chanPoint.Index,
  7070  		}
  7071  
  7072  		singleBackupResp.ChanBackups = append(
  7073  			singleBackupResp.ChanBackups,
  7074  			&lnrpc.ChannelBackup{
  7075  				ChanPoint:  rpcChanPoint,
  7076  				ChanBackup: singlePackedBackup,
  7077  			},
  7078  		)
  7079  	}
  7080  
  7081  	// In addition, to the set of single chan backups, we'll also create a
  7082  	// single multi-channel backup which can be serialized into a single
  7083  	// file for safe storage.
  7084  	var b bytes.Buffer
  7085  	unpackedMultiBackup := chanbackup.Multi{
  7086  		StaticBackups: backups,
  7087  	}
  7088  	err = unpackedMultiBackup.PackToWriter(&b, r.server.cc.KeyRing)
  7089  	if err != nil {
  7090  		return nil, fmt.Errorf("unable to multi-pack backups: %v", err)
  7091  	}
  7092  
  7093  	multiBackupResp := &lnrpc.MultiChanBackup{
  7094  		MultiChanBackup: b.Bytes(),
  7095  	}
  7096  	for _, singleBackup := range singleBackupResp.ChanBackups {
  7097  		multiBackupResp.ChanPoints = append(
  7098  			multiBackupResp.ChanPoints, singleBackup.ChanPoint,
  7099  		)
  7100  	}
  7101  
  7102  	return &lnrpc.ChanBackupSnapshot{
  7103  		SingleChanBackups: singleBackupResp,
  7104  		MultiChanBackup:   multiBackupResp,
  7105  	}, nil
  7106  }
  7107  
  7108  // ExportAllChannelBackups returns static channel backups for all existing
  7109  // channels known to lnd. A set of regular singular static channel backups for
  7110  // each channel are returned. Additionally, a multi-channel backup is returned
  7111  // as well, which contains a single encrypted blob containing the backups of
  7112  // each channel.
  7113  func (r *rpcServer) ExportAllChannelBackups(ctx context.Context,
  7114  	in *lnrpc.ChanBackupExportRequest) (*lnrpc.ChanBackupSnapshot, error) {
  7115  
  7116  	// First, we'll attempt to read back ups for ALL currently opened
  7117  	// channels from disk.
  7118  	allUnpackedBackups, err := chanbackup.FetchStaticChanBackups(
  7119  		r.server.chanStateDB, r.server.addrSource,
  7120  	)
  7121  	if err != nil {
  7122  		return nil, fmt.Errorf("unable to fetch all static chan "+
  7123  			"backups: %v", err)
  7124  	}
  7125  
  7126  	// With the backups assembled, we'll create a full snapshot.
  7127  	return r.createBackupSnapshot(allUnpackedBackups)
  7128  }
  7129  
  7130  // RestoreChannelBackups accepts a set of singular channel backups, or a single
  7131  // encrypted multi-chan backup and attempts to recover any funds remaining
  7132  // within the channel. If we're able to unpack the backup, then the new channel
  7133  // will be shown under listchannels, as well as pending channels.
  7134  func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
  7135  	in *lnrpc.RestoreChanBackupRequest) (*lnrpc.RestoreBackupResponse, error) {
  7136  
  7137  	// The server hasn't yet started, so it won't be able to service any of
  7138  	// our requests, so we'll bail early here.
  7139  	if !r.server.Started() {
  7140  		return nil, ErrServerNotActive
  7141  	}
  7142  
  7143  	// First, we'll make our implementation of the
  7144  	// chanbackup.ChannelRestorer interface which we'll use to properly
  7145  	// restore either a set of chanbackup.Single or chanbackup.Multi
  7146  	// backups.
  7147  	chanRestorer := &chanDBRestorer{
  7148  		db:         r.server.chanStateDB,
  7149  		secretKeys: r.server.cc.KeyRing,
  7150  		chainArb:   r.server.chainArb,
  7151  	}
  7152  
  7153  	// We'll accept either a list of Single backups, or a single Multi
  7154  	// backup which contains several single backups.
  7155  	switch {
  7156  	case in.GetChanBackups() != nil:
  7157  		chanBackupsProtos := in.GetChanBackups()
  7158  
  7159  		// Now that we know what type of backup we're working with,
  7160  		// we'll parse them all out into a more suitable format.
  7161  		packedBackups := make([][]byte, 0, len(chanBackupsProtos.ChanBackups))
  7162  		for _, chanBackup := range chanBackupsProtos.ChanBackups {
  7163  			packedBackups = append(
  7164  				packedBackups, chanBackup.ChanBackup,
  7165  			)
  7166  		}
  7167  
  7168  		// With our backups obtained, we'll now restore them which will
  7169  		// write the new backups to disk, and then attempt to connect
  7170  		// out to any peers that we know of which were our prior
  7171  		// channel peers.
  7172  		err := chanbackup.UnpackAndRecoverSingles(
  7173  			chanbackup.PackedSingles(packedBackups),
  7174  			r.server.cc.KeyRing, chanRestorer, r.server,
  7175  		)
  7176  		if err != nil {
  7177  			return nil, fmt.Errorf("unable to unpack single "+
  7178  				"backups: %v", err)
  7179  		}
  7180  
  7181  	case in.GetMultiChanBackup() != nil:
  7182  		packedMultiBackup := in.GetMultiChanBackup()
  7183  
  7184  		// With our backups obtained, we'll now restore them which will
  7185  		// write the new backups to disk, and then attempt to connect
  7186  		// out to any peers that we know of which were our prior
  7187  		// channel peers.
  7188  		packedMulti := chanbackup.PackedMulti(packedMultiBackup)
  7189  		err := chanbackup.UnpackAndRecoverMulti(
  7190  			packedMulti, r.server.cc.KeyRing, chanRestorer,
  7191  			r.server,
  7192  		)
  7193  		if err != nil {
  7194  			return nil, fmt.Errorf("unable to unpack chan "+
  7195  				"backup: %v", err)
  7196  		}
  7197  	}
  7198  
  7199  	return &lnrpc.RestoreBackupResponse{}, nil
  7200  }
  7201  
  7202  // SubscribeChannelBackups allows a client to sub-subscribe to the most up to
  7203  // date information concerning the state of all channel back ups. Each time a
  7204  // new channel is added, we return the new set of channels, along with a
  7205  // multi-chan backup containing the backup info for all channels. Each time a
  7206  // channel is closed, we send a new update, which contains new new chan back
  7207  // ups, but the updated set of encrypted multi-chan backups with the closed
  7208  // channel(s) removed.
  7209  func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription,
  7210  	updateStream lnrpc.Lightning_SubscribeChannelBackupsServer) error {
  7211  
  7212  	// First, we'll subscribe to the primary channel notifier so we can
  7213  	// obtain events for new pending/opened/closed channels.
  7214  	chanSubscription, err := r.server.channelNotifier.SubscribeChannelEvents()
  7215  	if err != nil {
  7216  		return err
  7217  	}
  7218  
  7219  	defer chanSubscription.Cancel()
  7220  	for {
  7221  		select {
  7222  		// A new event has been sent by the channel notifier, we'll
  7223  		// assemble, then sling out a new event to the client.
  7224  		case e := <-chanSubscription.Updates():
  7225  			// TODO(roasbeef): batch dispatch ntnfs
  7226  
  7227  			switch e.(type) {
  7228  
  7229  			// We only care about new/closed channels, so we'll
  7230  			// skip any events for active/inactive channels.
  7231  			// To make the subscription behave the same way as the
  7232  			// synchronous call and the file based backup, we also
  7233  			// include pending channels in the update.
  7234  			case channelnotifier.ActiveChannelEvent:
  7235  				continue
  7236  			case channelnotifier.InactiveChannelEvent:
  7237  				continue
  7238  			case channelnotifier.ActiveLinkEvent:
  7239  				continue
  7240  			}
  7241  
  7242  			// Now that we know the channel state has changed,
  7243  			// we'll obtains the current set of single channel
  7244  			// backups from disk.
  7245  			chanBackups, err := chanbackup.FetchStaticChanBackups(
  7246  				r.server.chanStateDB, r.server.addrSource,
  7247  			)
  7248  			if err != nil {
  7249  				return fmt.Errorf("unable to fetch all "+
  7250  					"static chan backups: %v", err)
  7251  			}
  7252  
  7253  			// With our backups obtained, we'll pack them into a
  7254  			// snapshot and send them back to the client.
  7255  			backupSnapshot, err := r.createBackupSnapshot(
  7256  				chanBackups,
  7257  			)
  7258  			if err != nil {
  7259  				return err
  7260  			}
  7261  			err = updateStream.Send(backupSnapshot)
  7262  			if err != nil {
  7263  				return err
  7264  			}
  7265  
  7266  		// The response stream's context for whatever reason has been
  7267  		// closed. If context is closed by an exceeded deadline we will
  7268  		// return an error.
  7269  		case <-updateStream.Context().Done():
  7270  			if errors.Is(updateStream.Context().Err(), context.Canceled) {
  7271  				return nil
  7272  			}
  7273  			return updateStream.Context().Err()
  7274  
  7275  		case <-r.quit:
  7276  			return nil
  7277  		}
  7278  	}
  7279  }
  7280  
  7281  // ChannelAcceptor dispatches a bi-directional streaming RPC in which
  7282  // OpenChannel requests are sent to the client and the client responds with
  7283  // a boolean that tells LND whether or not to accept the channel. This allows
  7284  // node operators to specify their own criteria for accepting inbound channels
  7285  // through a single persistent connection.
  7286  func (r *rpcServer) ChannelAcceptor(stream lnrpc.Lightning_ChannelAcceptorServer) error {
  7287  	chainedAcceptor := r.chanPredicate
  7288  
  7289  	// Create a new RPCAcceptor which will send requests into the
  7290  	// newRequests channel when it receives them.
  7291  	rpcAcceptor := chanacceptor.NewRPCAcceptor(
  7292  		stream.Recv, stream.Send, r.cfg.AcceptorTimeout,
  7293  		r.cfg.ActiveNetParams.Params, r.quit,
  7294  	)
  7295  
  7296  	// Add the RPCAcceptor to the ChainedAcceptor and defer its removal.
  7297  	id := chainedAcceptor.AddAcceptor(rpcAcceptor)
  7298  	defer chainedAcceptor.RemoveAcceptor(id)
  7299  
  7300  	// Run the rpc acceptor, which will accept requests for channel
  7301  	// acceptance decisions from our chained acceptor, send them to the
  7302  	// channel acceptor and listen for and report responses. This function
  7303  	// blocks, and will exit if the rpcserver receives the instruction to
  7304  	// shutdown, or the client cancels.
  7305  	return rpcAcceptor.Run()
  7306  }
  7307  
  7308  // BakeMacaroon allows the creation of a new macaroon with custom read and write
  7309  // permissions. No first-party caveats are added since this can be done offline.
  7310  // If the --allow-external-permissions flag is set, the RPC will allow
  7311  // external permissions that LND is not aware of.
  7312  func (r *rpcServer) BakeMacaroon(ctx context.Context,
  7313  	req *lnrpc.BakeMacaroonRequest) (*lnrpc.BakeMacaroonResponse, error) {
  7314  
  7315  	rpcsLog.Debugf("[bakemacaroon]")
  7316  
  7317  	// If the --no-macaroons flag is used to start lnd, the macaroon service
  7318  	// is not initialized. Therefore we can't bake new macaroons.
  7319  	if r.macService == nil {
  7320  		return nil, errMacaroonDisabled
  7321  	}
  7322  
  7323  	helpMsg := fmt.Sprintf("supported actions are %v, supported entities "+
  7324  		"are %v", validActions, validEntities)
  7325  
  7326  	// Don't allow empty permission list as it doesn't make sense to have
  7327  	// a macaroon that is not allowed to access any RPC.
  7328  	if len(req.Permissions) == 0 {
  7329  		return nil, fmt.Errorf("permission list cannot be empty. "+
  7330  			"specify at least one action/entity pair. %s", helpMsg)
  7331  	}
  7332  
  7333  	// Validate and map permission struct used by gRPC to the one used by
  7334  	// the bakery. If the --allow-external-permissions flag is set, we
  7335  	// will not validate, but map.
  7336  	requestedPermissions := make([]bakery.Op, len(req.Permissions))
  7337  	for idx, op := range req.Permissions {
  7338  		if req.AllowExternalPermissions {
  7339  			requestedPermissions[idx] = bakery.Op{
  7340  				Entity: op.Entity,
  7341  				Action: op.Action,
  7342  			}
  7343  			continue
  7344  		}
  7345  
  7346  		if !stringInSlice(op.Entity, validEntities) {
  7347  			return nil, fmt.Errorf("invalid permission entity. %s",
  7348  				helpMsg)
  7349  		}
  7350  
  7351  		// Either we have the special entity "uri" which specifies a
  7352  		// full gRPC URI or we have one of the pre-defined actions.
  7353  		if op.Entity == macaroons.PermissionEntityCustomURI {
  7354  			allPermissions := r.interceptorChain.Permissions()
  7355  			_, ok := allPermissions[op.Action]
  7356  			if !ok {
  7357  				return nil, fmt.Errorf("invalid permission " +
  7358  					"action, must be an existing URI in " +
  7359  					"the format /package.Service/" +
  7360  					"MethodName")
  7361  			}
  7362  		} else if !stringInSlice(op.Action, validActions) {
  7363  			return nil, fmt.Errorf("invalid permission action. %s",
  7364  				helpMsg)
  7365  
  7366  		}
  7367  
  7368  		requestedPermissions[idx] = bakery.Op{
  7369  			Entity: op.Entity,
  7370  			Action: op.Action,
  7371  		}
  7372  	}
  7373  
  7374  	// Convert root key id from uint64 to bytes. Because the
  7375  	// DefaultRootKeyID is a digit 0 expressed in a byte slice of a string
  7376  	// "0", we will keep the IDs in the same format - all must be numeric,
  7377  	// and must be a byte slice of string value of the digit, e.g.,
  7378  	// uint64(123) to string(123).
  7379  	rootKeyID := []byte(strconv.FormatUint(req.RootKeyId, 10))
  7380  
  7381  	// Bake new macaroon with the given permissions and send it binary
  7382  	// serialized and hex encoded to the client.
  7383  	newMac, err := r.macService.NewMacaroon(
  7384  		ctx, rootKeyID, requestedPermissions...,
  7385  	)
  7386  	if err != nil {
  7387  		return nil, err
  7388  	}
  7389  	newMacBytes, err := newMac.M().MarshalBinary()
  7390  	if err != nil {
  7391  		return nil, err
  7392  	}
  7393  	resp := &lnrpc.BakeMacaroonResponse{}
  7394  	resp.Macaroon = hex.EncodeToString(newMacBytes)
  7395  
  7396  	return resp, nil
  7397  }
  7398  
  7399  // ListMacaroonIDs returns a list of macaroon root key IDs in use.
  7400  func (r *rpcServer) ListMacaroonIDs(ctx context.Context,
  7401  	req *lnrpc.ListMacaroonIDsRequest) (
  7402  	*lnrpc.ListMacaroonIDsResponse, error) {
  7403  
  7404  	rpcsLog.Debugf("[listmacaroonids]")
  7405  
  7406  	// If the --no-macaroons flag is used to start lnd, the macaroon service
  7407  	// is not initialized. Therefore we can't show any IDs.
  7408  	if r.macService == nil {
  7409  		return nil, errMacaroonDisabled
  7410  	}
  7411  
  7412  	rootKeyIDByteSlice, err := r.macService.ListMacaroonIDs(ctx)
  7413  	if err != nil {
  7414  		return nil, err
  7415  	}
  7416  
  7417  	var rootKeyIDs []uint64
  7418  	for _, value := range rootKeyIDByteSlice {
  7419  		// Convert bytes into uint64.
  7420  		id, err := strconv.ParseUint(string(value), 10, 64)
  7421  		if err != nil {
  7422  			return nil, err
  7423  		}
  7424  
  7425  		rootKeyIDs = append(rootKeyIDs, id)
  7426  	}
  7427  
  7428  	return &lnrpc.ListMacaroonIDsResponse{RootKeyIds: rootKeyIDs}, nil
  7429  }
  7430  
  7431  // DeleteMacaroonID removes a specific macaroon ID.
  7432  func (r *rpcServer) DeleteMacaroonID(ctx context.Context,
  7433  	req *lnrpc.DeleteMacaroonIDRequest) (
  7434  	*lnrpc.DeleteMacaroonIDResponse, error) {
  7435  
  7436  	rpcsLog.Debugf("[deletemacaroonid]")
  7437  
  7438  	// If the --no-macaroons flag is used to start lnd, the macaroon service
  7439  	// is not initialized. Therefore we can't delete any IDs.
  7440  	if r.macService == nil {
  7441  		return nil, errMacaroonDisabled
  7442  	}
  7443  
  7444  	// Convert root key id from uint64 to bytes. Because the
  7445  	// DefaultRootKeyID is a digit 0 expressed in a byte slice of a string
  7446  	// "0", we will keep the IDs in the same format - all must be digit, and
  7447  	// must be a byte slice of string value of the digit.
  7448  	rootKeyID := []byte(strconv.FormatUint(req.RootKeyId, 10))
  7449  	deletedIDBytes, err := r.macService.DeleteMacaroonID(ctx, rootKeyID)
  7450  	if err != nil {
  7451  		return nil, err
  7452  	}
  7453  
  7454  	return &lnrpc.DeleteMacaroonIDResponse{
  7455  		// If the root key ID doesn't exist, it won't be deleted. We
  7456  		// will return a response with deleted = false, otherwise true.
  7457  		Deleted: deletedIDBytes != nil,
  7458  	}, nil
  7459  }
  7460  
  7461  // ListPermissions lists all RPC method URIs and their required macaroon
  7462  // permissions to access them.
  7463  func (r *rpcServer) ListPermissions(_ context.Context,
  7464  	_ *lnrpc.ListPermissionsRequest) (*lnrpc.ListPermissionsResponse,
  7465  	error) {
  7466  
  7467  	rpcsLog.Debugf("[listpermissions]")
  7468  
  7469  	permissionMap := make(map[string]*lnrpc.MacaroonPermissionList)
  7470  	for uri, perms := range r.interceptorChain.Permissions() {
  7471  		rpcPerms := make([]*lnrpc.MacaroonPermission, len(perms))
  7472  		for idx, perm := range perms {
  7473  			rpcPerms[idx] = &lnrpc.MacaroonPermission{
  7474  				Entity: perm.Entity,
  7475  				Action: perm.Action,
  7476  			}
  7477  		}
  7478  		permissionMap[uri] = &lnrpc.MacaroonPermissionList{
  7479  			Permissions: rpcPerms,
  7480  		}
  7481  	}
  7482  
  7483  	return &lnrpc.ListPermissionsResponse{
  7484  		MethodPermissions: permissionMap,
  7485  	}, nil
  7486  }
  7487  
  7488  // CheckMacaroonPermissions checks the caveats and permissions of a macaroon.
  7489  func (r *rpcServer) CheckMacaroonPermissions(ctx context.Context,
  7490  	req *lnrpc.CheckMacPermRequest) (*lnrpc.CheckMacPermResponse, error) {
  7491  
  7492  	// Turn grpc macaroon permission into bakery.Op for the server to
  7493  	// process.
  7494  	permissions := make([]bakery.Op, len(req.Permissions))
  7495  	for idx, perm := range req.Permissions {
  7496  		permissions[idx] = bakery.Op{
  7497  			Entity: perm.Entity,
  7498  			Action: perm.Action,
  7499  		}
  7500  	}
  7501  
  7502  	err := r.macService.CheckMacAuth(
  7503  		ctx, req.Macaroon, permissions, req.FullMethod,
  7504  	)
  7505  	if err != nil {
  7506  		return nil, status.Error(codes.InvalidArgument, err.Error())
  7507  	}
  7508  
  7509  	return &lnrpc.CheckMacPermResponse{
  7510  		Valid: true,
  7511  	}, nil
  7512  }
  7513  
  7514  // FundingStateStep is an advanced funding related call that allows the caller
  7515  // to either execute some preparatory steps for a funding workflow, or manually
  7516  // progress a funding workflow. The primary way a funding flow is identified is
  7517  // via its pending channel ID. As an example, this method can be used to
  7518  // specify that we're expecting a funding flow for a particular pending channel
  7519  // ID, for which we need to use specific parameters.  Alternatively, this can
  7520  // be used to interactively drive PSBT signing for funding for partially
  7521  // complete funding transactions.
  7522  func (r *rpcServer) FundingStateStep(ctx context.Context,
  7523  	in *lnrpc.FundingTransitionMsg) (*lnrpc.FundingStateStepResp, error) {
  7524  
  7525  	var pendingChanID [32]byte
  7526  	switch {
  7527  
  7528  	// If this is a message to register a new shim that is an external
  7529  	// channel point, then we'll contact the wallet to register this new
  7530  	// shim. A user will use this method to register a new channel funding
  7531  	// workflow which has already been partially negotiated outside of the
  7532  	// core protocol.
  7533  	case in.GetShimRegister() != nil &&
  7534  		in.GetShimRegister().GetChanPointShim() != nil:
  7535  
  7536  		rpcShimIntent := in.GetShimRegister().GetChanPointShim()
  7537  
  7538  		// Using the rpc shim as a template, we'll construct a new
  7539  		// chanfunding.Assembler that is able to express proper
  7540  		// formulation of this expected channel.
  7541  		shimAssembler, err := newFundingShimAssembler(
  7542  			rpcShimIntent, false, r.server.cc.KeyRing,
  7543  		)
  7544  		if err != nil {
  7545  			return nil, err
  7546  		}
  7547  		req := &chanfunding.Request{
  7548  			RemoteAmt: dcrutil.Amount(rpcShimIntent.Amt),
  7549  		}
  7550  		shimIntent, err := shimAssembler.ProvisionChannel(req)
  7551  		if err != nil {
  7552  			return nil, err
  7553  		}
  7554  
  7555  		// Once we have the intent, we'll register it with the wallet.
  7556  		// Once we receive an incoming funding request that uses this
  7557  		// pending channel ID, then this shim will be dispatched in
  7558  		// place of our regular funding workflow.
  7559  		copy(pendingChanID[:], rpcShimIntent.PendingChanId)
  7560  		err = r.server.cc.Wallet.RegisterFundingIntent(
  7561  			pendingChanID, shimIntent,
  7562  		)
  7563  		if err != nil {
  7564  			return nil, err
  7565  		}
  7566  
  7567  	// There is no need to register a PSBT shim before opening the channel,
  7568  	// even though our RPC message structure allows for it. Inform the user
  7569  	// by returning a proper error instead of just doing nothing.
  7570  	case in.GetShimRegister() != nil &&
  7571  		in.GetShimRegister().GetPsbtShim() != nil:
  7572  
  7573  		return nil, fmt.Errorf("PSBT shim must only be sent when " +
  7574  			"opening a channel")
  7575  
  7576  	// If this is a transition to cancel an existing shim, then we'll pass
  7577  	// this message along to the wallet, informing it that the intent no
  7578  	// longer needs to be considered and should be cleaned up.
  7579  	case in.GetShimCancel() != nil:
  7580  		rpcsLog.Debugf("Canceling funding shim for pending_id=%x",
  7581  			in.GetShimCancel().PendingChanId)
  7582  
  7583  		copy(pendingChanID[:], in.GetShimCancel().PendingChanId)
  7584  		err := r.server.cc.Wallet.CancelFundingIntent(pendingChanID)
  7585  		if err != nil {
  7586  			return nil, err
  7587  		}
  7588  
  7589  	// If this is a transition to verify the PSBT for an existing shim,
  7590  	// we'll do so and then store the verified PSBT for later so we can
  7591  	// compare it to the final, signed one.
  7592  	case in.GetPsbtVerify() != nil:
  7593  		rpcsLog.Debugf("Verifying PSBT for pending_id=%x",
  7594  			in.GetPsbtVerify().PendingChanId)
  7595  
  7596  		copy(pendingChanID[:], in.GetPsbtVerify().PendingChanId)
  7597  		packet, err := psbt.NewFromRawBytes(
  7598  			bytes.NewReader(in.GetPsbtVerify().FundedPsbt), false,
  7599  		)
  7600  		if err != nil {
  7601  			return nil, fmt.Errorf("error parsing psbt: %v", err)
  7602  		}
  7603  
  7604  		err = r.server.cc.Wallet.PsbtFundingVerify(
  7605  			pendingChanID, packet, in.GetPsbtVerify().SkipFinalize,
  7606  		)
  7607  		if err != nil {
  7608  			return nil, err
  7609  		}
  7610  
  7611  	// If this is a transition to finalize the PSBT funding flow, we compare
  7612  	// the final PSBT to the previously verified one and if nothing
  7613  	// unexpected was changed, continue the channel opening process.
  7614  	case in.GetPsbtFinalize() != nil:
  7615  		msg := in.GetPsbtFinalize()
  7616  		rpcsLog.Debugf("Finalizing PSBT for pending_id=%x",
  7617  			msg.PendingChanId)
  7618  
  7619  		copy(pendingChanID[:], in.GetPsbtFinalize().PendingChanId)
  7620  
  7621  		var (
  7622  			packet *psbt.Packet
  7623  			rawTx  *wire.MsgTx
  7624  			err    error
  7625  		)
  7626  
  7627  		// Either the signed PSBT or the raw transaction need to be set
  7628  		// but not both at the same time.
  7629  		switch {
  7630  		case len(msg.SignedPsbt) > 0 && len(msg.FinalRawTx) > 0:
  7631  			return nil, fmt.Errorf("cannot set both signed PSBT " +
  7632  				"and final raw TX at the same time")
  7633  
  7634  		case len(msg.SignedPsbt) > 0:
  7635  			packet, err = psbt.NewFromRawBytes(
  7636  				bytes.NewReader(in.GetPsbtFinalize().SignedPsbt),
  7637  				false,
  7638  			)
  7639  			if err != nil {
  7640  				return nil, fmt.Errorf("error parsing psbt: %v",
  7641  					err)
  7642  			}
  7643  
  7644  		case len(msg.FinalRawTx) > 0:
  7645  			rawTx = &wire.MsgTx{}
  7646  			err = rawTx.Deserialize(bytes.NewReader(msg.FinalRawTx))
  7647  			if err != nil {
  7648  				return nil, fmt.Errorf("error parsing final "+
  7649  					"raw TX: %v", err)
  7650  			}
  7651  
  7652  		default:
  7653  			return nil, fmt.Errorf("PSBT or raw transaction to " +
  7654  				"finalize missing")
  7655  		}
  7656  
  7657  		err = r.server.cc.Wallet.PsbtFundingFinalize(
  7658  			pendingChanID, packet, rawTx,
  7659  		)
  7660  		if err != nil {
  7661  			return nil, err
  7662  		}
  7663  	}
  7664  
  7665  	// TODO(roasbeef): extend PendingChannels to also show shims
  7666  
  7667  	// TODO(roasbeef): return resulting state? also add a method to query
  7668  	// current state?
  7669  	return &lnrpc.FundingStateStepResp{}, nil
  7670  }
  7671  
  7672  // RegisterRPCMiddleware adds a new gRPC middleware to the interceptor chain. A
  7673  // gRPC middleware is software component external to lnd that aims to add
  7674  // additional business logic to lnd by observing/intercepting/validating
  7675  // incoming gRPC client requests and (if needed) replacing/overwriting outgoing
  7676  // messages before they're sent to the client. When registering the middleware
  7677  // must identify itself and indicate what custom macaroon caveats it wants to
  7678  // be responsible for. Only requests that contain a macaroon with that specific
  7679  // custom caveat are then sent to the middleware for inspection. As a security
  7680  // measure, _no_ middleware can intercept requests made with _unencumbered_
  7681  // macaroons!
  7682  func (r *rpcServer) RegisterRPCMiddleware(
  7683  	stream lnrpc.Lightning_RegisterRPCMiddlewareServer) error {
  7684  
  7685  	// This is a security critical functionality and needs to be enabled
  7686  	// specifically by the user.
  7687  	if !r.cfg.RPCMiddleware.Enable {
  7688  		return fmt.Errorf("RPC middleware not enabled in config")
  7689  	}
  7690  
  7691  	// When registering a middleware the first message being sent from the
  7692  	// middleware must be a registration message containing its name and the
  7693  	// custom caveat it wants to register for.
  7694  	var (
  7695  		registerChan = make(chan *lnrpc.MiddlewareRegistration, 1)
  7696  		errChan      = make(chan error, 1)
  7697  	)
  7698  	ctxc, cancel := context.WithTimeout(
  7699  		stream.Context(), r.cfg.RPCMiddleware.InterceptTimeout,
  7700  	)
  7701  	defer cancel()
  7702  
  7703  	// Read the first message in a goroutine because the Recv method blocks
  7704  	// until the message arrives.
  7705  	go func() {
  7706  		msg, err := stream.Recv()
  7707  		if err != nil {
  7708  			errChan <- err
  7709  
  7710  			return
  7711  		}
  7712  
  7713  		registerChan <- msg.GetRegister()
  7714  	}()
  7715  
  7716  	// Wait for the initial message to arrive or time out if it takes too
  7717  	// long.
  7718  	var registerMsg *lnrpc.MiddlewareRegistration
  7719  	select {
  7720  	case registerMsg = <-registerChan:
  7721  		if registerMsg == nil {
  7722  			return fmt.Errorf("invalid initial middleware " +
  7723  				"registration message")
  7724  		}
  7725  
  7726  	case err := <-errChan:
  7727  		return fmt.Errorf("error receiving initial middleware "+
  7728  			"registration message: %v", err)
  7729  
  7730  	case <-ctxc.Done():
  7731  		return ctxc.Err()
  7732  
  7733  	case <-r.quit:
  7734  		return ErrServerShuttingDown
  7735  	}
  7736  
  7737  	// Make sure the registration is valid.
  7738  	const nameMinLength = 5
  7739  	if len(registerMsg.MiddlewareName) < nameMinLength {
  7740  		return fmt.Errorf("invalid middleware name, use descriptive "+
  7741  			"name of at least %d characters", nameMinLength)
  7742  	}
  7743  
  7744  	readOnly := registerMsg.ReadOnlyMode
  7745  	caveatName := registerMsg.CustomMacaroonCaveatName
  7746  	switch {
  7747  	case readOnly && len(caveatName) > 0:
  7748  		return fmt.Errorf("cannot set read-only and custom caveat " +
  7749  			"name at the same time")
  7750  
  7751  	case !readOnly && len(caveatName) < nameMinLength:
  7752  		return fmt.Errorf("need to set either custom caveat name "+
  7753  			"of at least %d characters or read-only mode",
  7754  			nameMinLength)
  7755  	}
  7756  
  7757  	middleware := rpcperms.NewMiddlewareHandler(
  7758  		registerMsg.MiddlewareName,
  7759  		caveatName, readOnly, stream.Recv, stream.Send,
  7760  		r.cfg.RPCMiddleware.InterceptTimeout,
  7761  		r.cfg.ActiveNetParams.Params, r.quit,
  7762  	)
  7763  
  7764  	// Add the RPC middleware to the interceptor chain and defer its
  7765  	// removal.
  7766  	if err := r.interceptorChain.RegisterMiddleware(middleware); err != nil {
  7767  		return fmt.Errorf("error registering middleware: %v", err)
  7768  	}
  7769  	defer r.interceptorChain.RemoveMiddleware(registerMsg.MiddlewareName)
  7770  
  7771  	return middleware.Run()
  7772  }
  7773  
  7774  // SendCustomMessage sends a custom peer message.
  7775  func (r *rpcServer) SendCustomMessage(ctx context.Context, req *lnrpc.SendCustomMessageRequest) (
  7776  	*lnrpc.SendCustomMessageResponse, error) {
  7777  
  7778  	peer, err := route.NewVertexFromBytes(req.Peer)
  7779  	if err != nil {
  7780  		return nil, err
  7781  	}
  7782  
  7783  	err = r.server.SendCustomMessage(
  7784  		peer, lnwire.MessageType(req.Type), req.Data,
  7785  	)
  7786  	switch {
  7787  	case err == ErrPeerNotConnected:
  7788  		return nil, status.Error(codes.NotFound, err.Error())
  7789  	case err != nil:
  7790  		return nil, err
  7791  	}
  7792  
  7793  	return &lnrpc.SendCustomMessageResponse{}, nil
  7794  }
  7795  
  7796  // SubscribeCustomMessages subscribes to a stream of incoming custom peer
  7797  // messages.
  7798  func (r *rpcServer) SubscribeCustomMessages(req *lnrpc.SubscribeCustomMessagesRequest,
  7799  	server lnrpc.Lightning_SubscribeCustomMessagesServer) error {
  7800  
  7801  	client, err := r.server.SubscribeCustomMessages()
  7802  	if err != nil {
  7803  		return err
  7804  	}
  7805  	defer client.Cancel()
  7806  
  7807  	for {
  7808  		select {
  7809  		case <-client.Quit():
  7810  			return errors.New("shutdown")
  7811  
  7812  		case <-server.Context().Done():
  7813  			return server.Context().Err()
  7814  
  7815  		case update := <-client.Updates():
  7816  			customMsg := update.(*CustomMessage)
  7817  
  7818  			err := server.Send(&lnrpc.CustomMessage{
  7819  				Peer: customMsg.Peer[:],
  7820  				Data: customMsg.Msg.Data,
  7821  				Type: uint32(customMsg.Msg.Type),
  7822  			})
  7823  			if err != nil {
  7824  				return err
  7825  			}
  7826  		}
  7827  	}
  7828  }
  7829  
  7830  // rpcInitiator returns the correct lnrpc initiator for channels where we have
  7831  // a record of the opening channel.
  7832  func rpcInitiator(isInitiator bool) lnrpc.Initiator {
  7833  	if isInitiator {
  7834  		return lnrpc.Initiator_INITIATOR_LOCAL
  7835  	}
  7836  
  7837  	return lnrpc.Initiator_INITIATOR_REMOTE
  7838  }