github.com/decred/dcrlnd@v0.7.6/channeldb/graph.go (about)

     1  package channeldb
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"errors"
     7  	"fmt"
     8  	"image/color"
     9  	"io"
    10  	"math"
    11  	"net"
    12  	"sort"
    13  	"sync"
    14  	"time"
    15  
    16  	"github.com/decred/dcrd/chaincfg/chainhash"
    17  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    18  	"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
    19  	"github.com/decred/dcrd/dcrutil/v4"
    20  	"github.com/decred/dcrd/txscript/v4"
    21  	"github.com/decred/dcrd/wire"
    22  	"github.com/decred/dcrlnd/batch"
    23  	"github.com/decred/dcrlnd/input"
    24  	"github.com/decred/dcrlnd/kvdb"
    25  	"github.com/decred/dcrlnd/lnwire"
    26  	"github.com/decred/dcrlnd/routing/route"
    27  )
    28  
    29  var (
    30  	// nodeBucket is a bucket which houses all the vertices or nodes within
    31  	// the channel graph. This bucket has a single-sub bucket which adds an
    32  	// additional index from pubkey -> alias. Within the top-level of this
    33  	// bucket, the key space maps a node's compressed public key to the
    34  	// serialized information for that node. Additionally, there's a
    35  	// special key "source" which stores the pubkey of the source node. The
    36  	// source node is used as the starting point for all graph/queries and
    37  	// traversals. The graph is formed as a star-graph with the source node
    38  	// at the center.
    39  	//
    40  	// maps: pubKey -> nodeInfo
    41  	// maps: source -> selfPubKey
    42  	nodeBucket = []byte("graph-node")
    43  
    44  	// nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
    45  	// will be used to quickly look up the "freshness" of a node's last
    46  	// update to the network. The bucket only contains keys, and no values,
    47  	// it's mapping:
    48  	//
    49  	// maps: updateTime || nodeID -> nil
    50  	nodeUpdateIndexBucket = []byte("graph-node-update-index")
    51  
    52  	// sourceKey is a special key that resides within the nodeBucket. The
    53  	// sourceKey maps a key to the public key of the "self node".
    54  	sourceKey = []byte("source")
    55  
    56  	// aliasIndexBucket is a sub-bucket that's nested within the main
    57  	// nodeBucket. This bucket maps the public key of a node to its
    58  	// current alias. This bucket is provided as it can be used within a
    59  	// future UI layer to add an additional degree of confirmation.
    60  	aliasIndexBucket = []byte("alias")
    61  
    62  	// edgeBucket is a bucket which houses all of the edge or channel
    63  	// information within the channel graph. This bucket essentially acts
    64  	// as an adjacency list, which in conjunction with a range scan, can be
    65  	// used to iterate over all the incoming and outgoing edges for a
    66  	// particular node. Key in the bucket use a prefix scheme which leads
    67  	// with the node's public key and sends with the compact edge ID.
    68  	// For each chanID, there will be two entries within the bucket, as the
    69  	// graph is directed: nodes may have different policies w.r.t to fees
    70  	// for their respective directions.
    71  	//
    72  	// maps: pubKey || chanID -> channel edge policy for node
    73  	edgeBucket = []byte("graph-edge")
    74  
    75  	// unknownPolicy is represented as an empty slice. It is
    76  	// used as the value in edgeBucket for unknown channel edge policies.
    77  	// Unknown policies are still stored in the database to enable efficient
    78  	// lookup of incoming channel edges.
    79  	unknownPolicy = []byte{}
    80  
    81  	// chanStart is an array of all zero bytes which is used to perform
    82  	// range scans within the edgeBucket to obtain all of the outgoing
    83  	// edges for a particular node.
    84  	chanStart [8]byte
    85  
    86  	// edgeIndexBucket is an index which can be used to iterate all edges
    87  	// in the bucket, grouping them according to their in/out nodes.
    88  	// Additionally, the items in this bucket also contain the complete
    89  	// edge information for a channel. The edge information includes the
    90  	// capacity of the channel, the nodes that made the channel, etc. This
    91  	// bucket resides within the edgeBucket above. Creation of an edge
    92  	// proceeds in two phases: first the edge is added to the edge index,
    93  	// afterwards the edgeBucket can be updated with the latest details of
    94  	// the edge as they are announced on the network.
    95  	//
    96  	// maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
    97  	edgeIndexBucket = []byte("edge-index")
    98  
    99  	// edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
   100  	// bucket contains an index which allows us to gauge the "freshness" of
   101  	// a channel's last updates.
   102  	//
   103  	// maps: updateTime || chanID -> nil
   104  	edgeUpdateIndexBucket = []byte("edge-update-index")
   105  
   106  	// channelPointBucket maps a channel's full outpoint (txid:index) to
   107  	// its short 8-byte channel ID. This bucket resides within the
   108  	// edgeBucket above, and can be used to quickly remove an edge due to
   109  	// the outpoint being spent, or to query for existence of a channel.
   110  	//
   111  	// maps: outPoint -> chanID
   112  	channelPointBucket = []byte("chan-index")
   113  
   114  	// zombieBucket is a sub-bucket of the main edgeBucket bucket
   115  	// responsible for maintaining an index of zombie channels. Each entry
   116  	// exists within the bucket as follows:
   117  	//
   118  	// maps: chanID -> pubKey1 || pubKey2
   119  	//
   120  	// The chanID represents the channel ID of the edge that is marked as a
   121  	// zombie and is used as the key, which maps to the public keys of the
   122  	// edge's participants.
   123  	zombieBucket = []byte("zombie-index")
   124  
   125  	// disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket
   126  	// responsible for maintaining an index of disabled edge policies. Each
   127  	// entry exists within the bucket as follows:
   128  	//
   129  	// maps: <chanID><direction> -> []byte{}
   130  	//
   131  	// The chanID represents the channel ID of the edge and the direction is
   132  	// one byte representing the direction of the edge. The main purpose of
   133  	// this index is to allow pruning disabled channels in a fast way without
   134  	// the need to iterate all over the graph.
   135  	disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
   136  
   137  	// graphMetaBucket is a top-level bucket which stores various meta-deta
   138  	// related to the on-disk channel graph. Data stored in this bucket
   139  	// includes the block to which the graph has been synced to, the total
   140  	// number of channels, etc.
   141  	graphMetaBucket = []byte("graph-meta")
   142  
   143  	// pruneLogBucket is a bucket within the graphMetaBucket that stores
   144  	// a mapping from the block height to the hash for the blocks used to
   145  	// prune the graph.
   146  	// Once a new block is discovered, any channels that have been closed
   147  	// (by spending the outpoint) can safely be removed from the graph, and
   148  	// the block is added to the prune log. We need to keep such a log for
   149  	// the case where a reorg happens, and we must "rewind" the state of the
   150  	// graph by removing channels that were previously confirmed. In such a
   151  	// case we'll remove all entries from the prune log with a block height
   152  	// that no longer exists.
   153  	pruneLogBucket = []byte("prune-log")
   154  )
   155  
   156  const (
   157  	// MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
   158  	// we'll permit to be written to disk. We limit this as otherwise, it
   159  	// would be possible for a node to create a ton of updates and slowly
   160  	// fill our disk, and also waste bandwidth due to relaying.
   161  	MaxAllowedExtraOpaqueBytes = 10000
   162  
   163  	// feeRateParts is the total number of parts used to express fee rates.
   164  	feeRateParts = 1e6
   165  )
   166  
   167  // ChannelGraph is a persistent, on-disk graph representation of the Lightning
   168  // Network. This struct can be used to implement path finding algorithms on top
   169  // of, and also to update a node's view based on information received from the
   170  // p2p network. Internally, the graph is stored using a modified adjacency list
   171  // representation with some added object interaction possible with each
   172  // serialized edge/node. The graph is stored is directed, meaning that are two
   173  // edges stored for each channel: an inbound/outbound edge for each node pair.
   174  // Nodes, edges, and edge information can all be added to the graph
   175  // independently. Edge removal results in the deletion of all edge information
   176  // for that edge.
   177  type ChannelGraph struct {
   178  	db kvdb.Backend
   179  
   180  	cacheMu     sync.RWMutex
   181  	rejectCache *rejectCache
   182  	chanCache   *channelCache
   183  	graphCache  *GraphCache
   184  
   185  	chanScheduler batch.Scheduler
   186  	nodeScheduler batch.Scheduler
   187  }
   188  
   189  // NewChannelGraph allocates a new ChannelGraph backed by a DB instance. The
   190  // returned instance has its own unique reject cache and channel cache.
   191  func NewChannelGraph(db kvdb.Backend, rejectCacheSize, chanCacheSize int,
   192  	batchCommitInterval time.Duration, preAllocCacheNumNodes int,
   193  	useGraphCache bool) (*ChannelGraph, error) {
   194  
   195  	if err := initChannelGraph(db); err != nil {
   196  		return nil, err
   197  	}
   198  
   199  	g := &ChannelGraph{
   200  		db:          db,
   201  		rejectCache: newRejectCache(rejectCacheSize),
   202  		chanCache:   newChannelCache(chanCacheSize),
   203  	}
   204  	g.chanScheduler = batch.NewTimeScheduler(
   205  		db, &g.cacheMu, batchCommitInterval,
   206  	)
   207  	g.nodeScheduler = batch.NewTimeScheduler(
   208  		db, nil, batchCommitInterval,
   209  	)
   210  
   211  	// The graph cache can be turned off (e.g. for mobile users) for a
   212  	// speed/memory usage tradeoff.
   213  	if useGraphCache {
   214  		g.graphCache = NewGraphCache(preAllocCacheNumNodes)
   215  		startTime := time.Now()
   216  		log.Debugf("Populating in-memory channel graph, this might " +
   217  			"take a while...")
   218  
   219  		err := g.ForEachNodeCacheable(
   220  			func(tx kvdb.RTx, node GraphCacheNode) error {
   221  				g.graphCache.AddNodeFeatures(node)
   222  
   223  				return nil
   224  			},
   225  		)
   226  		if err != nil {
   227  			return nil, err
   228  		}
   229  
   230  		err = g.ForEachChannel(func(info *ChannelEdgeInfo,
   231  			policy1, policy2 *ChannelEdgePolicy) error {
   232  
   233  			g.graphCache.AddChannel(info, policy1, policy2)
   234  
   235  			return nil
   236  		})
   237  		if err != nil {
   238  			return nil, err
   239  		}
   240  
   241  		log.Debugf("Finished populating in-memory channel graph (took "+
   242  			"%v, %s)", time.Since(startTime), g.graphCache.Stats())
   243  	}
   244  
   245  	return g, nil
   246  }
   247  
   248  // channelMapKey is the key structure used for storing channel edge policies.
   249  type channelMapKey struct {
   250  	nodeKey route.Vertex
   251  	chanID  [8]byte
   252  }
   253  
   254  // getChannelMap loads all channel edge policies from the database and stores
   255  // them in a map.
   256  func (c *ChannelGraph) getChannelMap(edges kvdb.RBucket) (
   257  	map[channelMapKey]*ChannelEdgePolicy, error) {
   258  
   259  	// Create a map to store all channel edge policies.
   260  	channelMap := make(map[channelMapKey]*ChannelEdgePolicy)
   261  
   262  	err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
   263  		// Skip embedded buckets.
   264  		if bytes.Equal(k, edgeIndexBucket) ||
   265  			bytes.Equal(k, edgeUpdateIndexBucket) ||
   266  			bytes.Equal(k, zombieBucket) ||
   267  			bytes.Equal(k, disabledEdgePolicyBucket) ||
   268  			bytes.Equal(k, channelPointBucket) {
   269  
   270  			return nil
   271  		}
   272  
   273  		// Validate key length.
   274  		if len(k) != 33+8 {
   275  			return fmt.Errorf("invalid edge key %x encountered", k)
   276  		}
   277  
   278  		var key channelMapKey
   279  		copy(key.nodeKey[:], k[:33])
   280  		copy(key.chanID[:], k[33:])
   281  
   282  		// No need to deserialize unknown policy.
   283  		if bytes.Equal(edgeBytes, unknownPolicy) {
   284  			return nil
   285  		}
   286  
   287  		edgeReader := bytes.NewReader(edgeBytes)
   288  		edge, err := deserializeChanEdgePolicyRaw(
   289  			edgeReader,
   290  		)
   291  
   292  		switch {
   293  		// If the db policy was missing an expected optional field, we
   294  		// return nil as if the policy was unknown.
   295  		case err == ErrEdgePolicyOptionalFieldNotFound:
   296  			return nil
   297  
   298  		case err != nil:
   299  			return err
   300  		}
   301  
   302  		channelMap[key] = edge
   303  
   304  		return nil
   305  	})
   306  	if err != nil {
   307  		return nil, err
   308  	}
   309  
   310  	return channelMap, nil
   311  }
   312  
   313  var graphTopLevelBuckets = [][]byte{
   314  	nodeBucket,
   315  	edgeBucket,
   316  	edgeIndexBucket,
   317  	graphMetaBucket,
   318  }
   319  
   320  // Wipe completely deletes all saved state within all used buckets within the
   321  // database. The deletion is done in a single transaction, therefore this
   322  // operation is fully atomic.
   323  func (c *ChannelGraph) Wipe() error {
   324  	err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
   325  		for _, tlb := range graphTopLevelBuckets {
   326  			err := tx.DeleteTopLevelBucket(tlb)
   327  			if err != nil && err != kvdb.ErrBucketNotFound {
   328  				return err
   329  			}
   330  		}
   331  		return nil
   332  	}, func() {})
   333  	if err != nil {
   334  		return err
   335  	}
   336  
   337  	return initChannelGraph(c.db)
   338  }
   339  
   340  // createChannelDB creates and initializes a fresh version of channeldb. In
   341  // the case that the target path has not yet been created or doesn't yet exist,
   342  // then the path is created. Additionally, all required top-level buckets used
   343  // within the database are created.
   344  func initChannelGraph(db kvdb.Backend) error {
   345  	err := kvdb.Update(db, func(tx kvdb.RwTx) error {
   346  		for _, tlb := range graphTopLevelBuckets {
   347  			if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
   348  				return err
   349  			}
   350  		}
   351  
   352  		nodes := tx.ReadWriteBucket(nodeBucket)
   353  		_, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
   354  		if err != nil {
   355  			return err
   356  		}
   357  		_, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
   358  		if err != nil {
   359  			return err
   360  		}
   361  
   362  		edges := tx.ReadWriteBucket(edgeBucket)
   363  		_, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
   364  		if err != nil {
   365  			return err
   366  		}
   367  		_, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
   368  		if err != nil {
   369  			return err
   370  		}
   371  		_, err = edges.CreateBucketIfNotExists(channelPointBucket)
   372  		if err != nil {
   373  			return err
   374  		}
   375  		_, err = edges.CreateBucketIfNotExists(zombieBucket)
   376  		if err != nil {
   377  			return err
   378  		}
   379  
   380  		graphMeta := tx.ReadWriteBucket(graphMetaBucket)
   381  		_, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
   382  		return err
   383  	}, func() {})
   384  	if err != nil {
   385  		return fmt.Errorf("unable to create new channel graph: %v", err)
   386  	}
   387  
   388  	return nil
   389  }
   390  
   391  // NewPathFindTx returns a new read transaction that can be used for a single
   392  // path finding session. Will return nil if the graph cache is enabled.
   393  func (c *ChannelGraph) NewPathFindTx() (kvdb.RTx, error) {
   394  	if c.graphCache != nil {
   395  		return nil, nil
   396  	}
   397  
   398  	return c.db.BeginReadTx()
   399  }
   400  
   401  // ForEachChannel iterates through all the channel edges stored within the
   402  // graph and invokes the passed callback for each edge. The callback takes two
   403  // edges as since this is a directed graph, both the in/out edges are visited.
   404  // If the callback returns an error, then the transaction is aborted and the
   405  // iteration stops early.
   406  //
   407  // NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
   408  // for that particular channel edge routing policy will be passed into the
   409  // callback.
   410  func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo,
   411  	*ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
   412  
   413  	return c.db.View(func(tx kvdb.RTx) error {
   414  		edges := tx.ReadBucket(edgeBucket)
   415  		if edges == nil {
   416  			return ErrGraphNoEdgesFound
   417  		}
   418  
   419  		// First, load all edges in memory indexed by node and channel
   420  		// id.
   421  		channelMap, err := c.getChannelMap(edges)
   422  		if err != nil {
   423  			return err
   424  		}
   425  
   426  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
   427  		if edgeIndex == nil {
   428  			return ErrGraphNoEdgesFound
   429  		}
   430  
   431  		// Load edge index, recombine each channel with the policies
   432  		// loaded above and invoke the callback.
   433  		return kvdb.ForAll(edgeIndex, func(k, edgeInfoBytes []byte) error {
   434  			var chanID [8]byte
   435  			copy(chanID[:], k)
   436  
   437  			edgeInfoReader := bytes.NewReader(edgeInfoBytes)
   438  			info, err := deserializeChanEdgeInfo(edgeInfoReader)
   439  			if err != nil {
   440  				return err
   441  			}
   442  
   443  			policy1 := channelMap[channelMapKey{
   444  				nodeKey: info.NodeKey1Bytes,
   445  				chanID:  chanID,
   446  			}]
   447  
   448  			policy2 := channelMap[channelMapKey{
   449  				nodeKey: info.NodeKey2Bytes,
   450  				chanID:  chanID,
   451  			}]
   452  
   453  			return cb(&info, policy1, policy2)
   454  		})
   455  	}, func() {})
   456  }
   457  
   458  // ForEachNodeChannel iterates through all channels of a given node, executing the
   459  // passed callback with an edge info structure and the policies of each end
   460  // of the channel. The first edge policy is the outgoing edge *to* the
   461  // connecting node, while the second is the incoming edge *from* the
   462  // connecting node. If the callback returns an error, then the iteration is
   463  // halted with the error propagated back up to the caller.
   464  //
   465  // Unknown policies are passed into the callback as nil values.
   466  func (c *ChannelGraph) ForEachNodeChannel(tx kvdb.RTx, node route.Vertex,
   467  	cb func(channel *DirectedChannel) error) error {
   468  
   469  	if c.graphCache != nil {
   470  		return c.graphCache.ForEachChannel(node, cb)
   471  	}
   472  
   473  	// Fallback that uses the database.
   474  	toNodeCallback := func() route.Vertex {
   475  		return node
   476  	}
   477  	toNodeFeatures, err := c.FetchNodeFeatures(node)
   478  	if err != nil {
   479  		return err
   480  	}
   481  
   482  	dbCallback := func(tx kvdb.RTx, e *ChannelEdgeInfo, p1,
   483  		p2 *ChannelEdgePolicy) error {
   484  
   485  		var cachedInPolicy *CachedEdgePolicy
   486  		if p2 != nil {
   487  			cachedInPolicy = NewCachedPolicy(p2)
   488  			cachedInPolicy.ToNodePubKey = toNodeCallback
   489  			cachedInPolicy.ToNodeFeatures = toNodeFeatures
   490  		}
   491  
   492  		directedChannel := &DirectedChannel{
   493  			ChannelID:    e.ChannelID,
   494  			IsNode1:      node == e.NodeKey1Bytes,
   495  			OtherNode:    e.NodeKey2Bytes,
   496  			Capacity:     e.Capacity,
   497  			OutPolicySet: p1 != nil,
   498  			InPolicy:     cachedInPolicy,
   499  		}
   500  
   501  		if node == e.NodeKey2Bytes {
   502  			directedChannel.OtherNode = e.NodeKey1Bytes
   503  		}
   504  
   505  		return cb(directedChannel)
   506  	}
   507  	return nodeTraversal(tx, node[:], c.db, dbCallback)
   508  }
   509  
   510  // FetchNodeFeatures returns the features of a given node. If no features are
   511  // known for the node, an empty feature vector is returned.
   512  func (c *ChannelGraph) FetchNodeFeatures(
   513  	node route.Vertex) (*lnwire.FeatureVector, error) {
   514  
   515  	if c.graphCache != nil {
   516  		return c.graphCache.GetFeatures(node), nil
   517  	}
   518  
   519  	// Fallback that uses the database.
   520  	targetNode, err := c.FetchLightningNode(node)
   521  	switch err {
   522  
   523  	// If the node exists and has features, return them directly.
   524  	case nil:
   525  		return targetNode.Features, nil
   526  
   527  	// If we couldn't find a node announcement, populate a blank feature
   528  	// vector.
   529  	case ErrGraphNodeNotFound:
   530  		return lnwire.EmptyFeatureVector(), nil
   531  
   532  	// Otherwise, bubble the error up.
   533  	default:
   534  		return nil, err
   535  	}
   536  }
   537  
   538  // ForEachNodeCached is similar to ForEachNode, but it utilizes the channel
   539  // graph cache instead. Note that this doesn't return all the information the
   540  // regular ForEachNode method does.
   541  //
   542  // NOTE: The callback contents MUST not be modified.
   543  func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
   544  	chans map[uint64]*DirectedChannel) error) error {
   545  
   546  	if c.graphCache != nil {
   547  		return c.graphCache.ForEachNode(cb)
   548  	}
   549  
   550  	// Otherwise call back to a version that uses the database directly.
   551  	// We'll iterate over each node, then the set of channels for each
   552  	// node, and construct a similar callback functiopn signature as the
   553  	// main funcotin expects.
   554  	return c.ForEachNode(func(tx kvdb.RTx, node *LightningNode) error {
   555  		channels := make(map[uint64]*DirectedChannel)
   556  
   557  		err := node.ForEachChannel(tx, func(tx kvdb.RTx,
   558  			e *ChannelEdgeInfo, p1 *ChannelEdgePolicy,
   559  			p2 *ChannelEdgePolicy) error {
   560  
   561  			toNodeCallback := func() route.Vertex {
   562  				return node.PubKeyBytes
   563  			}
   564  			toNodeFeatures, err := c.FetchNodeFeatures(
   565  				node.PubKeyBytes,
   566  			)
   567  			if err != nil {
   568  				return err
   569  			}
   570  
   571  			var cachedInPolicy *CachedEdgePolicy
   572  			if p2 != nil {
   573  				cachedInPolicy := NewCachedPolicy(p2)
   574  				cachedInPolicy.ToNodePubKey = toNodeCallback
   575  				cachedInPolicy.ToNodeFeatures = toNodeFeatures
   576  			}
   577  
   578  			directedChannel := &DirectedChannel{
   579  				ChannelID:    e.ChannelID,
   580  				IsNode1:      node.PubKeyBytes == e.NodeKey1Bytes,
   581  				OtherNode:    e.NodeKey2Bytes,
   582  				Capacity:     e.Capacity,
   583  				OutPolicySet: p1 != nil,
   584  				InPolicy:     cachedInPolicy,
   585  			}
   586  
   587  			if node.PubKeyBytes == e.NodeKey2Bytes {
   588  				directedChannel.OtherNode = e.NodeKey1Bytes
   589  			}
   590  
   591  			channels[e.ChannelID] = directedChannel
   592  
   593  			return nil
   594  
   595  		})
   596  		if err != nil {
   597  			return err
   598  		}
   599  
   600  		return cb(node.PubKeyBytes, channels)
   601  	})
   602  }
   603  
   604  // DisabledChannelIDs returns the channel ids of disabled channels.
   605  // A channel is disabled when two of the associated ChanelEdgePolicies
   606  // have their disabled bit on.
   607  func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) {
   608  	var disabledChanIDs []uint64
   609  	var chanEdgeFound map[uint64]struct{}
   610  
   611  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
   612  		edges := tx.ReadBucket(edgeBucket)
   613  		if edges == nil {
   614  			return ErrGraphNoEdgesFound
   615  		}
   616  
   617  		disabledEdgePolicyIndex := edges.NestedReadBucket(
   618  			disabledEdgePolicyBucket,
   619  		)
   620  		if disabledEdgePolicyIndex == nil {
   621  			return nil
   622  		}
   623  
   624  		// We iterate over all disabled policies and we add each channel that
   625  		// has more than one disabled policy to disabledChanIDs array.
   626  		return disabledEdgePolicyIndex.ForEach(func(k, v []byte) error {
   627  			chanID := byteOrder.Uint64(k[:8])
   628  			_, edgeFound := chanEdgeFound[chanID]
   629  			if edgeFound {
   630  				delete(chanEdgeFound, chanID)
   631  				disabledChanIDs = append(disabledChanIDs, chanID)
   632  				return nil
   633  			}
   634  
   635  			chanEdgeFound[chanID] = struct{}{}
   636  			return nil
   637  		})
   638  	}, func() {
   639  		disabledChanIDs = nil
   640  		chanEdgeFound = make(map[uint64]struct{})
   641  	})
   642  	if err != nil {
   643  		return nil, err
   644  	}
   645  
   646  	return disabledChanIDs, nil
   647  }
   648  
   649  // ForEachNode iterates through all the stored vertices/nodes in the graph,
   650  // executing the passed callback with each node encountered. If the callback
   651  // returns an error, then the transaction is aborted and the iteration stops
   652  // early.
   653  //
   654  // TODO(roasbeef): add iterator interface to allow for memory efficient graph
   655  // traversal when graph gets mega
   656  func (c *ChannelGraph) ForEachNode(cb func(kvdb.RTx, *LightningNode) error) error { // nolint:interfacer
   657  	traversal := func(tx kvdb.RTx) error {
   658  		// First grab the nodes bucket which stores the mapping from
   659  		// pubKey to node information.
   660  		nodes := tx.ReadBucket(nodeBucket)
   661  		if nodes == nil {
   662  			return ErrGraphNotFound
   663  		}
   664  
   665  		return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
   666  			// If this is the source key, then we skip this
   667  			// iteration as the value for this key is a pubKey
   668  			// rather than raw node information.
   669  			if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
   670  				return nil
   671  			}
   672  
   673  			nodeReader := bytes.NewReader(nodeBytes)
   674  			node, err := deserializeLightningNode(nodeReader)
   675  			if err != nil {
   676  				return err
   677  			}
   678  			node.db = c.db
   679  
   680  			// Execute the callback, the transaction will abort if
   681  			// this returns an error.
   682  			return cb(tx, &node)
   683  		})
   684  	}
   685  
   686  	return kvdb.View(c.db, traversal, func() {})
   687  }
   688  
   689  // ForEachNodeCacheable iterates through all the stored vertices/nodes in the
   690  // graph, executing the passed callback with each node encountered. If the
   691  // callback returns an error, then the transaction is aborted and the iteration
   692  // stops early.
   693  func (c *ChannelGraph) ForEachNodeCacheable(cb func(kvdb.RTx,
   694  	GraphCacheNode) error) error {
   695  
   696  	traversal := func(tx kvdb.RTx) error {
   697  		// First grab the nodes bucket which stores the mapping from
   698  		// pubKey to node information.
   699  		nodes := tx.ReadBucket(nodeBucket)
   700  		if nodes == nil {
   701  			return ErrGraphNotFound
   702  		}
   703  
   704  		return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
   705  			// If this is the source key, then we skip this
   706  			// iteration as the value for this key is a pubKey
   707  			// rather than raw node information.
   708  			if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
   709  				return nil
   710  			}
   711  
   712  			nodeReader := bytes.NewReader(nodeBytes)
   713  			cacheableNode, err := deserializeLightningNodeCacheable(
   714  				nodeReader,
   715  			)
   716  			if err != nil {
   717  				return err
   718  			}
   719  
   720  			// Execute the callback, the transaction will abort if
   721  			// this returns an error.
   722  			return cb(tx, cacheableNode)
   723  		})
   724  	}
   725  
   726  	return kvdb.View(c.db, traversal, func() {})
   727  }
   728  
   729  // SourceNode returns the source node of the graph. The source node is treated
   730  // as the center node within a star-graph. This method may be used to kick off
   731  // a path finding algorithm in order to explore the reachability of another
   732  // node based off the source node.
   733  func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
   734  	var source *LightningNode
   735  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
   736  		// First grab the nodes bucket which stores the mapping from
   737  		// pubKey to node information.
   738  		nodes := tx.ReadBucket(nodeBucket)
   739  		if nodes == nil {
   740  			return ErrGraphNotFound
   741  		}
   742  
   743  		node, err := c.sourceNode(nodes)
   744  		if err != nil {
   745  			return err
   746  		}
   747  		source = node
   748  
   749  		return nil
   750  	}, func() {
   751  		source = nil
   752  	})
   753  	if err != nil {
   754  		return nil, err
   755  	}
   756  
   757  	return source, nil
   758  }
   759  
   760  // sourceNode uses an existing database transaction and returns the source node
   761  // of the graph. The source node is treated as the center node within a
   762  // star-graph. This method may be used to kick off a path finding algorithm in
   763  // order to explore the reachability of another node based off the source node.
   764  func (c *ChannelGraph) sourceNode(nodes kvdb.RBucket) (*LightningNode, error) {
   765  	selfPub := nodes.Get(sourceKey)
   766  	if selfPub == nil {
   767  		return nil, ErrSourceNodeNotSet
   768  	}
   769  
   770  	// With the pubKey of the source node retrieved, we're able to
   771  	// fetch the full node information.
   772  	node, err := fetchLightningNode(nodes, selfPub)
   773  	if err != nil {
   774  		return nil, err
   775  	}
   776  	node.db = c.db
   777  
   778  	return &node, nil
   779  }
   780  
   781  // SetSourceNode sets the source node within the graph database. The source
   782  // node is to be used as the center of a star-graph within path finding
   783  // algorithms.
   784  func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
   785  	nodePubBytes := node.PubKeyBytes[:]
   786  
   787  	return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
   788  		// First grab the nodes bucket which stores the mapping from
   789  		// pubKey to node information.
   790  		nodes, err := tx.CreateTopLevelBucket(nodeBucket)
   791  		if err != nil {
   792  			return err
   793  		}
   794  
   795  		// Next we create the mapping from source to the targeted
   796  		// public key.
   797  		if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
   798  			return err
   799  		}
   800  
   801  		// Finally, we commit the information of the lightning node
   802  		// itself.
   803  		return addLightningNode(tx, node)
   804  	}, func() {})
   805  }
   806  
   807  // AddLightningNode adds a vertex/node to the graph database. If the node is not
   808  // in the database from before, this will add a new, unconnected one to the
   809  // graph. If it is present from before, this will update that node's
   810  // information. Note that this method is expected to only be called to update an
   811  // already present node from a node announcement, or to insert a node found in a
   812  // channel update.
   813  //
   814  // TODO(roasbeef): also need sig of announcement
   815  func (c *ChannelGraph) AddLightningNode(node *LightningNode,
   816  	op ...batch.SchedulerOption) error {
   817  
   818  	r := &batch.Request{
   819  		Update: func(tx kvdb.RwTx) error {
   820  			if c.graphCache != nil {
   821  				cNode := newGraphCacheNode(
   822  					node.PubKeyBytes, node.Features,
   823  				)
   824  				err := c.graphCache.AddNode(tx, cNode)
   825  				if err != nil {
   826  					return err
   827  				}
   828  			}
   829  
   830  			return addLightningNode(tx, node)
   831  		},
   832  	}
   833  
   834  	for _, f := range op {
   835  		f(r)
   836  	}
   837  
   838  	return c.nodeScheduler.Execute(r)
   839  }
   840  
   841  func addLightningNode(tx kvdb.RwTx, node *LightningNode) error {
   842  	nodes, err := tx.CreateTopLevelBucket(nodeBucket)
   843  	if err != nil {
   844  		return err
   845  	}
   846  
   847  	aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
   848  	if err != nil {
   849  		return err
   850  	}
   851  
   852  	updateIndex, err := nodes.CreateBucketIfNotExists(
   853  		nodeUpdateIndexBucket,
   854  	)
   855  	if err != nil {
   856  		return err
   857  	}
   858  
   859  	return putLightningNode(nodes, aliases, updateIndex, node)
   860  }
   861  
   862  // LookupAlias attempts to return the alias as advertised by the target node.
   863  // TODO(roasbeef): currently assumes that aliases are unique...
   864  func (c *ChannelGraph) LookupAlias(pub *secp256k1.PublicKey) (string, error) {
   865  	var alias string
   866  
   867  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
   868  		nodes := tx.ReadBucket(nodeBucket)
   869  		if nodes == nil {
   870  			return ErrGraphNodesNotFound
   871  		}
   872  
   873  		aliases := nodes.NestedReadBucket(aliasIndexBucket)
   874  		if aliases == nil {
   875  			return ErrGraphNodesNotFound
   876  		}
   877  
   878  		nodePub := pub.SerializeCompressed()
   879  		a := aliases.Get(nodePub)
   880  		if a == nil {
   881  			return ErrNodeAliasNotFound
   882  		}
   883  
   884  		// TODO(roasbeef): should actually be using the utf-8
   885  		// package...
   886  		alias = string(a)
   887  		return nil
   888  	}, func() {
   889  		alias = ""
   890  	})
   891  	if err != nil {
   892  		return "", err
   893  	}
   894  
   895  	return alias, nil
   896  }
   897  
   898  // DeleteLightningNode starts a new database transaction to remove a vertex/node
   899  // from the database according to the node's public key.
   900  func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
   901  	// TODO(roasbeef): ensure dangling edges are removed...
   902  	return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
   903  		nodes := tx.ReadWriteBucket(nodeBucket)
   904  		if nodes == nil {
   905  			return ErrGraphNodeNotFound
   906  		}
   907  
   908  		if c.graphCache != nil {
   909  			c.graphCache.RemoveNode(nodePub)
   910  		}
   911  
   912  		return c.deleteLightningNode(nodes, nodePub[:])
   913  	}, func() {})
   914  }
   915  
   916  // deleteLightningNode uses an existing database transaction to remove a
   917  // vertex/node from the database according to the node's public key.
   918  func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket,
   919  	compressedPubKey []byte) error {
   920  
   921  	aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
   922  	if aliases == nil {
   923  		return ErrGraphNodesNotFound
   924  	}
   925  
   926  	if err := aliases.Delete(compressedPubKey); err != nil {
   927  		return err
   928  	}
   929  
   930  	// Before we delete the node, we'll fetch its current state so we can
   931  	// determine when its last update was to clear out the node update
   932  	// index.
   933  	node, err := fetchLightningNode(nodes, compressedPubKey)
   934  	if err != nil {
   935  		return err
   936  	}
   937  
   938  	if err := nodes.Delete(compressedPubKey); err != nil {
   939  
   940  		return err
   941  	}
   942  
   943  	// Finally, we'll delete the index entry for the node within the
   944  	// nodeUpdateIndexBucket as this node is no longer active, so we don't
   945  	// need to track its last update.
   946  	nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
   947  	if nodeUpdateIndex == nil {
   948  		return ErrGraphNodesNotFound
   949  	}
   950  
   951  	// In order to delete the entry, we'll need to reconstruct the key for
   952  	// its last update.
   953  	updateUnix := uint64(node.LastUpdate.Unix())
   954  	var indexKey [8 + 33]byte
   955  	byteOrder.PutUint64(indexKey[:8], updateUnix)
   956  	copy(indexKey[8:], compressedPubKey)
   957  
   958  	return nodeUpdateIndex.Delete(indexKey[:])
   959  }
   960  
   961  // AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
   962  // undirected edge from the two target nodes are created. The information stored
   963  // denotes the static attributes of the channel, such as the channelID, the keys
   964  // involved in creation of the channel, and the set of features that the channel
   965  // supports. The chanPoint and chanID are used to uniquely identify the edge
   966  // globally within the database.
   967  func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo,
   968  	op ...batch.SchedulerOption) error {
   969  
   970  	var alreadyExists bool
   971  	r := &batch.Request{
   972  		Reset: func() {
   973  			alreadyExists = false
   974  		},
   975  		Update: func(tx kvdb.RwTx) error {
   976  			err := c.addChannelEdge(tx, edge)
   977  
   978  			// Silence ErrEdgeAlreadyExist so that the batch can
   979  			// succeed, but propagate the error via local state.
   980  			if err == ErrEdgeAlreadyExist {
   981  				alreadyExists = true
   982  				return nil
   983  			}
   984  
   985  			return err
   986  		},
   987  		OnCommit: func(err error) error {
   988  			switch {
   989  			case err != nil:
   990  				return err
   991  			case alreadyExists:
   992  				return ErrEdgeAlreadyExist
   993  			default:
   994  				c.rejectCache.remove(edge.ChannelID)
   995  				c.chanCache.remove(edge.ChannelID)
   996  				return nil
   997  			}
   998  		},
   999  	}
  1000  
  1001  	for _, f := range op {
  1002  		f(r)
  1003  	}
  1004  
  1005  	return c.chanScheduler.Execute(r)
  1006  }
  1007  
  1008  // addChannelEdge is the private form of AddChannelEdge that allows callers to
  1009  // utilize an existing db transaction.
  1010  func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) error {
  1011  	// Construct the channel's primary key which is the 8-byte channel ID.
  1012  	var chanKey [8]byte
  1013  	binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
  1014  
  1015  	nodes, err := tx.CreateTopLevelBucket(nodeBucket)
  1016  	if err != nil {
  1017  		return err
  1018  	}
  1019  	edges, err := tx.CreateTopLevelBucket(edgeBucket)
  1020  	if err != nil {
  1021  		return err
  1022  	}
  1023  	edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
  1024  	if err != nil {
  1025  		return err
  1026  	}
  1027  	chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
  1028  	if err != nil {
  1029  		return err
  1030  	}
  1031  
  1032  	// First, attempt to check if this edge has already been created. If
  1033  	// so, then we can exit early as this method is meant to be idempotent.
  1034  	if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
  1035  		return ErrEdgeAlreadyExist
  1036  	}
  1037  
  1038  	if c.graphCache != nil {
  1039  		c.graphCache.AddChannel(edge, nil, nil)
  1040  	}
  1041  
  1042  	// Before we insert the channel into the database, we'll ensure that
  1043  	// both nodes already exist in the channel graph. If either node
  1044  	// doesn't, then we'll insert a "shell" node that just includes its
  1045  	// public key, so subsequent validation and queries can work properly.
  1046  	_, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
  1047  	switch {
  1048  	case node1Err == ErrGraphNodeNotFound:
  1049  		node1Shell := LightningNode{
  1050  			PubKeyBytes:          edge.NodeKey1Bytes,
  1051  			HaveNodeAnnouncement: false,
  1052  		}
  1053  		err := addLightningNode(tx, &node1Shell)
  1054  		if err != nil {
  1055  			return fmt.Errorf("unable to create shell node "+
  1056  				"for: %x", edge.NodeKey1Bytes)
  1057  
  1058  		}
  1059  	case node1Err != nil:
  1060  		return err
  1061  	}
  1062  
  1063  	_, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
  1064  	switch {
  1065  	case node2Err == ErrGraphNodeNotFound:
  1066  		node2Shell := LightningNode{
  1067  			PubKeyBytes:          edge.NodeKey2Bytes,
  1068  			HaveNodeAnnouncement: false,
  1069  		}
  1070  		err := addLightningNode(tx, &node2Shell)
  1071  		if err != nil {
  1072  			return fmt.Errorf("unable to create shell node "+
  1073  				"for: %x", edge.NodeKey2Bytes)
  1074  
  1075  		}
  1076  	case node2Err != nil:
  1077  		return err
  1078  	}
  1079  
  1080  	// If the edge hasn't been created yet, then we'll first add it to the
  1081  	// edge index in order to associate the edge between two nodes and also
  1082  	// store the static components of the channel.
  1083  	if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
  1084  		return err
  1085  	}
  1086  
  1087  	// Mark edge policies for both sides as unknown. This is to enable
  1088  	// efficient incoming channel lookup for a node.
  1089  	for _, key := range []*[33]byte{&edge.NodeKey1Bytes,
  1090  		&edge.NodeKey2Bytes} {
  1091  
  1092  		err := putChanEdgePolicyUnknown(edges, edge.ChannelID,
  1093  			key[:])
  1094  		if err != nil {
  1095  			return err
  1096  		}
  1097  	}
  1098  
  1099  	// Finally we add it to the channel index which maps channel points
  1100  	// (outpoints) to the shorter channel ID's.
  1101  	var b bytes.Buffer
  1102  	if err := writeOutpoint(&b, &edge.ChannelPoint); err != nil {
  1103  		return err
  1104  	}
  1105  	return chanIndex.Put(b.Bytes(), chanKey[:])
  1106  }
  1107  
  1108  // HasChannelEdge returns true if the database knows of a channel edge with the
  1109  // passed channel ID, and false otherwise. If an edge with that ID is found
  1110  // within the graph, then two time stamps representing the last time the edge
  1111  // was updated for both directed edges are returned along with the boolean. If
  1112  // it is not found, then the zombie index is checked and its result is returned
  1113  // as the second boolean.
  1114  func (c *ChannelGraph) HasChannelEdge(
  1115  	chanID uint64) (time.Time, time.Time, bool, bool, error) {
  1116  
  1117  	var (
  1118  		upd1Time time.Time
  1119  		upd2Time time.Time
  1120  		exists   bool
  1121  		isZombie bool
  1122  	)
  1123  
  1124  	// We'll query the cache with the shared lock held to allow multiple
  1125  	// readers to access values in the cache concurrently if they exist.
  1126  	c.cacheMu.RLock()
  1127  	if entry, ok := c.rejectCache.get(chanID); ok {
  1128  		c.cacheMu.RUnlock()
  1129  		upd1Time = time.Unix(entry.upd1Time, 0)
  1130  		upd2Time = time.Unix(entry.upd2Time, 0)
  1131  		exists, isZombie = entry.flags.unpack()
  1132  		return upd1Time, upd2Time, exists, isZombie, nil
  1133  	}
  1134  	c.cacheMu.RUnlock()
  1135  
  1136  	c.cacheMu.Lock()
  1137  	defer c.cacheMu.Unlock()
  1138  
  1139  	// The item was not found with the shared lock, so we'll acquire the
  1140  	// exclusive lock and check the cache again in case another method added
  1141  	// the entry to the cache while no lock was held.
  1142  	if entry, ok := c.rejectCache.get(chanID); ok {
  1143  		upd1Time = time.Unix(entry.upd1Time, 0)
  1144  		upd2Time = time.Unix(entry.upd2Time, 0)
  1145  		exists, isZombie = entry.flags.unpack()
  1146  		return upd1Time, upd2Time, exists, isZombie, nil
  1147  	}
  1148  
  1149  	if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  1150  		edges := tx.ReadBucket(edgeBucket)
  1151  		if edges == nil {
  1152  			return ErrGraphNoEdgesFound
  1153  		}
  1154  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  1155  		if edgeIndex == nil {
  1156  			return ErrGraphNoEdgesFound
  1157  		}
  1158  
  1159  		var channelID [8]byte
  1160  		byteOrder.PutUint64(channelID[:], chanID)
  1161  
  1162  		// If the edge doesn't exist, then we'll also check our zombie
  1163  		// index.
  1164  		if edgeIndex.Get(channelID[:]) == nil {
  1165  			exists = false
  1166  			zombieIndex := edges.NestedReadBucket(zombieBucket)
  1167  			if zombieIndex != nil {
  1168  				isZombie, _, _ = isZombieEdge(
  1169  					zombieIndex, chanID,
  1170  				)
  1171  			}
  1172  
  1173  			return nil
  1174  		}
  1175  
  1176  		exists = true
  1177  		isZombie = false
  1178  
  1179  		// If the channel has been found in the graph, then retrieve
  1180  		// the edges itself so we can return the last updated
  1181  		// timestamps.
  1182  		nodes := tx.ReadBucket(nodeBucket)
  1183  		if nodes == nil {
  1184  			return ErrGraphNodeNotFound
  1185  		}
  1186  
  1187  		e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, nodes,
  1188  			channelID[:], c.db)
  1189  		if err != nil {
  1190  			return err
  1191  		}
  1192  
  1193  		// As we may have only one of the edges populated, only set the
  1194  		// update time if the edge was found in the database.
  1195  		if e1 != nil {
  1196  			upd1Time = e1.LastUpdate
  1197  		}
  1198  		if e2 != nil {
  1199  			upd2Time = e2.LastUpdate
  1200  		}
  1201  
  1202  		return nil
  1203  	}, func() {}); err != nil {
  1204  		return time.Time{}, time.Time{}, exists, isZombie, err
  1205  	}
  1206  
  1207  	c.rejectCache.insert(chanID, rejectCacheEntry{
  1208  		upd1Time: upd1Time.Unix(),
  1209  		upd2Time: upd2Time.Unix(),
  1210  		flags:    packRejectFlags(exists, isZombie),
  1211  	})
  1212  
  1213  	return upd1Time, upd2Time, exists, isZombie, nil
  1214  }
  1215  
  1216  // UpdateChannelEdge retrieves and update edge of the graph database. Method
  1217  // only reserved for updating an edge info after its already been created.
  1218  // In order to maintain this constraints, we return an error in the scenario
  1219  // that an edge info hasn't yet been created yet, but someone attempts to update
  1220  // it.
  1221  func (c *ChannelGraph) UpdateChannelEdge(edge *ChannelEdgeInfo) error {
  1222  	// Construct the channel's primary key which is the 8-byte channel ID.
  1223  	var chanKey [8]byte
  1224  	binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
  1225  
  1226  	return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
  1227  		edges := tx.ReadWriteBucket(edgeBucket)
  1228  		if edge == nil {
  1229  			return ErrEdgeNotFound
  1230  		}
  1231  
  1232  		edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
  1233  		if edgeIndex == nil {
  1234  			return ErrEdgeNotFound
  1235  		}
  1236  
  1237  		if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo == nil {
  1238  			return ErrEdgeNotFound
  1239  		}
  1240  
  1241  		if c.graphCache != nil {
  1242  			c.graphCache.UpdateChannel(edge)
  1243  		}
  1244  
  1245  		return putChanEdgeInfo(edgeIndex, edge, chanKey)
  1246  	}, func() {})
  1247  }
  1248  
  1249  const (
  1250  	// pruneTipBytes is the total size of the value which stores a prune
  1251  	// entry of the graph in the prune log. The "prune tip" is the last
  1252  	// entry in the prune log, and indicates if the channel graph is in
  1253  	// sync with the current UTXO state. The structure of the value
  1254  	// is: blockHash, taking 32 bytes total.
  1255  	pruneTipBytes = 32
  1256  )
  1257  
  1258  // PruneGraph prunes newly closed channels from the channel graph in response
  1259  // to a new block being solved on the network. Any transactions which spend the
  1260  // funding output of any known channels within he graph will be deleted.
  1261  // Additionally, the "prune tip", or the last block which has been used to
  1262  // prune the graph is stored so callers can ensure the graph is fully in sync
  1263  // with the current UTXO state. A slice of channels that have been closed by
  1264  // the target block are returned if the function succeeds without error.
  1265  func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
  1266  	blockHash *chainhash.Hash, blockHeight uint32) ([]*ChannelEdgeInfo, error) {
  1267  
  1268  	c.cacheMu.Lock()
  1269  	defer c.cacheMu.Unlock()
  1270  
  1271  	var chansClosed []*ChannelEdgeInfo
  1272  
  1273  	err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
  1274  		// First grab the edges bucket which houses the information
  1275  		// we'd like to delete
  1276  		edges, err := tx.CreateTopLevelBucket(edgeBucket)
  1277  		if err != nil {
  1278  			return err
  1279  		}
  1280  
  1281  		// Next grab the two edge indexes which will also need to be updated.
  1282  		edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
  1283  		if err != nil {
  1284  			return err
  1285  		}
  1286  		chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
  1287  		if err != nil {
  1288  			return err
  1289  		}
  1290  		nodes := tx.ReadWriteBucket(nodeBucket)
  1291  		if nodes == nil {
  1292  			return ErrSourceNodeNotSet
  1293  		}
  1294  		zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
  1295  		if err != nil {
  1296  			return err
  1297  		}
  1298  
  1299  		// For each of the outpoints that have been spent within the
  1300  		// block, we attempt to delete them from the graph as if that
  1301  		// outpoint was a channel, then it has now been closed.
  1302  		for _, chanPoint := range spentOutputs {
  1303  			// TODO(roasbeef): load channel bloom filter, continue
  1304  			// if NOT if filter
  1305  
  1306  			var opBytes bytes.Buffer
  1307  			if err := writeOutpoint(&opBytes, chanPoint); err != nil {
  1308  				return err
  1309  			}
  1310  
  1311  			// First attempt to see if the channel exists within
  1312  			// the database, if not, then we can exit early.
  1313  			chanID := chanIndex.Get(opBytes.Bytes())
  1314  			if chanID == nil {
  1315  				continue
  1316  			}
  1317  
  1318  			// However, if it does, then we'll read out the full
  1319  			// version so we can add it to the set of deleted
  1320  			// channels.
  1321  			edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
  1322  			if err != nil {
  1323  				return err
  1324  			}
  1325  
  1326  			// Attempt to delete the channel, an ErrEdgeNotFound
  1327  			// will be returned if that outpoint isn't known to be
  1328  			// a channel. If no error is returned, then a channel
  1329  			// was successfully pruned.
  1330  			err = c.delChannelEdge(
  1331  				edges, edgeIndex, chanIndex, zombieIndex, nodes,
  1332  				chanID, false, false,
  1333  			)
  1334  			if err != nil && err != ErrEdgeNotFound {
  1335  				return err
  1336  			}
  1337  
  1338  			chansClosed = append(chansClosed, &edgeInfo)
  1339  		}
  1340  
  1341  		metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
  1342  		if err != nil {
  1343  			return err
  1344  		}
  1345  
  1346  		pruneBucket, err := metaBucket.CreateBucketIfNotExists(pruneLogBucket)
  1347  		if err != nil {
  1348  			return err
  1349  		}
  1350  
  1351  		// With the graph pruned, add a new entry to the prune log,
  1352  		// which can be used to check if the graph is fully synced with
  1353  		// the current UTXO state.
  1354  		var blockHeightBytes [4]byte
  1355  		byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
  1356  
  1357  		var newTip [pruneTipBytes]byte
  1358  		copy(newTip[:], blockHash[:])
  1359  
  1360  		err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
  1361  		if err != nil {
  1362  			return err
  1363  		}
  1364  
  1365  		// Now that the graph has been pruned, we'll also attempt to
  1366  		// prune any nodes that have had a channel closed within the
  1367  		// latest block.
  1368  		return c.pruneGraphNodes(nodes, edgeIndex)
  1369  	}, func() {
  1370  		chansClosed = nil
  1371  	})
  1372  	if err != nil {
  1373  		return nil, err
  1374  	}
  1375  
  1376  	for _, channel := range chansClosed {
  1377  		c.rejectCache.remove(channel.ChannelID)
  1378  		c.chanCache.remove(channel.ChannelID)
  1379  	}
  1380  
  1381  	if c.graphCache != nil {
  1382  		log.Debugf("Pruned graph, cache now has %s",
  1383  			c.graphCache.Stats())
  1384  	}
  1385  
  1386  	return chansClosed, nil
  1387  }
  1388  
  1389  // PruneGraphNodes is a garbage collection method which attempts to prune out
  1390  // any nodes from the channel graph that are currently unconnected. This ensure
  1391  // that we only maintain a graph of reachable nodes. In the event that a pruned
  1392  // node gains more channels, it will be re-added back to the graph.
  1393  func (c *ChannelGraph) PruneGraphNodes() error {
  1394  	return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
  1395  		nodes := tx.ReadWriteBucket(nodeBucket)
  1396  		if nodes == nil {
  1397  			return ErrGraphNodesNotFound
  1398  		}
  1399  		edges := tx.ReadWriteBucket(edgeBucket)
  1400  		if edges == nil {
  1401  			return ErrGraphNotFound
  1402  		}
  1403  		edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
  1404  		if edgeIndex == nil {
  1405  			return ErrGraphNoEdgesFound
  1406  		}
  1407  
  1408  		return c.pruneGraphNodes(nodes, edgeIndex)
  1409  	}, func() {})
  1410  }
  1411  
  1412  // pruneGraphNodes attempts to remove any nodes from the graph who have had a
  1413  // channel closed within the current block. If the node still has existing
  1414  // channels in the graph, this will act as a no-op.
  1415  func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket,
  1416  	edgeIndex kvdb.RwBucket) error {
  1417  
  1418  	log.Trace("Pruning nodes from graph with no open channels")
  1419  
  1420  	// We'll retrieve the graph's source node to ensure we don't remove it
  1421  	// even if it no longer has any open channels.
  1422  	sourceNode, err := c.sourceNode(nodes)
  1423  	if err != nil {
  1424  		return err
  1425  	}
  1426  
  1427  	// We'll use this map to keep count the number of references to a node
  1428  	// in the graph. A node should only be removed once it has no more
  1429  	// references in the graph.
  1430  	nodeRefCounts := make(map[[33]byte]int)
  1431  	err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
  1432  		// If this is the source key, then we skip this
  1433  		// iteration as the value for this key is a pubKey
  1434  		// rather than raw node information.
  1435  		if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
  1436  			return nil
  1437  		}
  1438  
  1439  		var nodePub [33]byte
  1440  		copy(nodePub[:], pubKey)
  1441  		nodeRefCounts[nodePub] = 0
  1442  
  1443  		return nil
  1444  	})
  1445  	if err != nil {
  1446  		return err
  1447  	}
  1448  
  1449  	// To ensure we never delete the source node, we'll start off by
  1450  	// bumping its ref count to 1.
  1451  	nodeRefCounts[sourceNode.PubKeyBytes] = 1
  1452  
  1453  	// Next, we'll run through the edgeIndex which maps a channel ID to the
  1454  	// edge info. We'll use this scan to populate our reference count map
  1455  	// above.
  1456  	err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
  1457  		// The first 66 bytes of the edge info contain the pubkeys of
  1458  		// the nodes that this edge attaches. We'll extract them, and
  1459  		// add them to the ref count map.
  1460  		var node1, node2 [33]byte
  1461  		copy(node1[:], edgeInfoBytes[:33])
  1462  		copy(node2[:], edgeInfoBytes[33:])
  1463  
  1464  		// With the nodes extracted, we'll increase the ref count of
  1465  		// each of the nodes.
  1466  		nodeRefCounts[node1]++
  1467  		nodeRefCounts[node2]++
  1468  
  1469  		return nil
  1470  	})
  1471  	if err != nil {
  1472  		return err
  1473  	}
  1474  
  1475  	// Finally, we'll make a second pass over the set of nodes, and delete
  1476  	// any nodes that have a ref count of zero.
  1477  	var numNodesPruned int
  1478  	for nodePubKey, refCount := range nodeRefCounts {
  1479  		// If the ref count of the node isn't zero, then we can safely
  1480  		// skip it as it still has edges to or from it within the
  1481  		// graph.
  1482  		if refCount != 0 {
  1483  			continue
  1484  		}
  1485  
  1486  		if c.graphCache != nil {
  1487  			c.graphCache.RemoveNode(nodePubKey)
  1488  		}
  1489  
  1490  		// If we reach this point, then there are no longer any edges
  1491  		// that connect this node, so we can delete it.
  1492  		if err := c.deleteLightningNode(nodes, nodePubKey[:]); err != nil {
  1493  			log.Warnf("Unable to prune node %x from the "+
  1494  				"graph: %v", nodePubKey, err)
  1495  			continue
  1496  		}
  1497  
  1498  		log.Infof("Pruned unconnected node %x from channel graph",
  1499  			nodePubKey[:])
  1500  
  1501  		numNodesPruned++
  1502  	}
  1503  
  1504  	if numNodesPruned > 0 {
  1505  		log.Infof("Pruned %v unconnected nodes from the channel graph",
  1506  			numNodesPruned)
  1507  	}
  1508  
  1509  	return nil
  1510  }
  1511  
  1512  // DisconnectBlockAtHeight is used to indicate that the block specified
  1513  // by the passed height has been disconnected from the main chain. This
  1514  // will "rewind" the graph back to the height below, deleting channels
  1515  // that are no longer confirmed from the graph. The prune log will be
  1516  // set to the last prune height valid for the remaining chain.
  1517  // Channels that were removed from the graph resulting from the
  1518  // disconnected block are returned.
  1519  func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInfo,
  1520  	error) {
  1521  
  1522  	// Every channel having a ShortChannelID starting at 'height'
  1523  	// will no longer be confirmed.
  1524  	startShortChanID := lnwire.ShortChannelID{
  1525  		BlockHeight: height,
  1526  	}
  1527  
  1528  	// Delete everything after this height from the db.
  1529  	endShortChanID := lnwire.ShortChannelID{
  1530  		BlockHeight: math.MaxUint32 & 0x00ffffff,
  1531  		TxIndex:     math.MaxUint32 & 0x00ffffff,
  1532  		TxPosition:  math.MaxUint16,
  1533  	}
  1534  	// The block height will be the 3 first bytes of the channel IDs.
  1535  	var chanIDStart [8]byte
  1536  	byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
  1537  	var chanIDEnd [8]byte
  1538  	byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
  1539  
  1540  	c.cacheMu.Lock()
  1541  	defer c.cacheMu.Unlock()
  1542  
  1543  	// Keep track of the channels that are removed from the graph.
  1544  	var removedChans []*ChannelEdgeInfo
  1545  
  1546  	if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
  1547  		edges, err := tx.CreateTopLevelBucket(edgeBucket)
  1548  		if err != nil {
  1549  			return err
  1550  		}
  1551  		edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
  1552  		if err != nil {
  1553  			return err
  1554  		}
  1555  		chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
  1556  		if err != nil {
  1557  			return err
  1558  		}
  1559  		zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
  1560  		if err != nil {
  1561  			return err
  1562  		}
  1563  		nodes, err := tx.CreateTopLevelBucket(nodeBucket)
  1564  		if err != nil {
  1565  			return err
  1566  		}
  1567  
  1568  		// Scan from chanIDStart to chanIDEnd, deleting every
  1569  		// found edge.
  1570  		// NOTE: we must delete the edges after the cursor loop, since
  1571  		// modifying the bucket while traversing is not safe.
  1572  		var keys [][]byte
  1573  		cursor := edgeIndex.ReadWriteCursor()
  1574  		for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
  1575  			bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
  1576  
  1577  			edgeInfoReader := bytes.NewReader(v)
  1578  			edgeInfo, err := deserializeChanEdgeInfo(edgeInfoReader)
  1579  			if err != nil {
  1580  				return err
  1581  			}
  1582  
  1583  			keys = append(keys, k)
  1584  			removedChans = append(removedChans, &edgeInfo)
  1585  		}
  1586  
  1587  		for _, k := range keys {
  1588  			err = c.delChannelEdge(
  1589  				edges, edgeIndex, chanIndex, zombieIndex, nodes,
  1590  				k, false, false,
  1591  			)
  1592  			if err != nil && err != ErrEdgeNotFound {
  1593  				return err
  1594  			}
  1595  		}
  1596  
  1597  		// Delete all the entries in the prune log having a height
  1598  		// greater or equal to the block disconnected.
  1599  		metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
  1600  		if err != nil {
  1601  			return err
  1602  		}
  1603  
  1604  		pruneBucket, err := metaBucket.CreateBucketIfNotExists(pruneLogBucket)
  1605  		if err != nil {
  1606  			return err
  1607  		}
  1608  
  1609  		var pruneKeyStart [4]byte
  1610  		byteOrder.PutUint32(pruneKeyStart[:], height)
  1611  
  1612  		var pruneKeyEnd [4]byte
  1613  		byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
  1614  
  1615  		// To avoid modifying the bucket while traversing, we delete
  1616  		// the keys in a second loop.
  1617  		var pruneKeys [][]byte
  1618  		pruneCursor := pruneBucket.ReadWriteCursor()
  1619  		for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
  1620  			bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
  1621  
  1622  			pruneKeys = append(pruneKeys, k)
  1623  		}
  1624  
  1625  		for _, k := range pruneKeys {
  1626  			if err := pruneBucket.Delete(k); err != nil {
  1627  				return err
  1628  			}
  1629  		}
  1630  
  1631  		return nil
  1632  	}, func() {
  1633  		removedChans = nil
  1634  	}); err != nil {
  1635  		return nil, err
  1636  	}
  1637  
  1638  	for _, channel := range removedChans {
  1639  		c.rejectCache.remove(channel.ChannelID)
  1640  		c.chanCache.remove(channel.ChannelID)
  1641  	}
  1642  
  1643  	return removedChans, nil
  1644  }
  1645  
  1646  // PruneTip returns the block height and hash of the latest block that has been
  1647  // used to prune channels in the graph. Knowing the "prune tip" allows callers
  1648  // to tell if the graph is currently in sync with the current best known UTXO
  1649  // state.
  1650  func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, error) {
  1651  	var (
  1652  		tipHash   chainhash.Hash
  1653  		tipHeight uint32
  1654  	)
  1655  
  1656  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  1657  		graphMeta := tx.ReadBucket(graphMetaBucket)
  1658  		if graphMeta == nil {
  1659  			return ErrGraphNotFound
  1660  		}
  1661  		pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
  1662  		if pruneBucket == nil {
  1663  			return ErrGraphNeverPruned
  1664  		}
  1665  
  1666  		pruneCursor := pruneBucket.ReadCursor()
  1667  
  1668  		// The prune key with the largest block height will be our
  1669  		// prune tip.
  1670  		k, v := pruneCursor.Last()
  1671  		if k == nil {
  1672  			return ErrGraphNeverPruned
  1673  		}
  1674  
  1675  		// Once we have the prune tip, the value will be the block hash,
  1676  		// and the key the block height.
  1677  		copy(tipHash[:], v)
  1678  		tipHeight = byteOrder.Uint32(k)
  1679  
  1680  		return nil
  1681  	}, func() {})
  1682  	if err != nil {
  1683  		return nil, 0, err
  1684  	}
  1685  
  1686  	return &tipHash, tipHeight, nil
  1687  }
  1688  
  1689  // DeleteChannelEdges removes edges with the given channel IDs from the
  1690  // database and marks them as zombies. This ensures that we're unable to re-add
  1691  // it to our database once again. If an edge does not exist within the
  1692  // database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
  1693  // true, then when we mark these edges as zombies, we'll set up the keys such
  1694  // that we require the node that failed to send the fresh update to be the one
  1695  // that resurrects the channel from its zombie state.
  1696  func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning bool,
  1697  	chanIDs ...uint64) error {
  1698  
  1699  	// TODO(roasbeef): possibly delete from node bucket if node has no more
  1700  	// channels
  1701  	// TODO(roasbeef): don't delete both edges?
  1702  
  1703  	c.cacheMu.Lock()
  1704  	defer c.cacheMu.Unlock()
  1705  
  1706  	err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
  1707  		edges := tx.ReadWriteBucket(edgeBucket)
  1708  		if edges == nil {
  1709  			return ErrEdgeNotFound
  1710  		}
  1711  		edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
  1712  		if edgeIndex == nil {
  1713  			return ErrEdgeNotFound
  1714  		}
  1715  		chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
  1716  		if chanIndex == nil {
  1717  			return ErrEdgeNotFound
  1718  		}
  1719  		nodes := tx.ReadWriteBucket(nodeBucket)
  1720  		if nodes == nil {
  1721  			return ErrGraphNodeNotFound
  1722  		}
  1723  		zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
  1724  		if err != nil {
  1725  			return err
  1726  		}
  1727  
  1728  		var rawChanID [8]byte
  1729  		for _, chanID := range chanIDs {
  1730  			byteOrder.PutUint64(rawChanID[:], chanID)
  1731  			err := c.delChannelEdge(
  1732  				edges, edgeIndex, chanIndex, zombieIndex, nodes,
  1733  				rawChanID[:], true, strictZombiePruning,
  1734  			)
  1735  			if err != nil {
  1736  				return err
  1737  			}
  1738  		}
  1739  
  1740  		return nil
  1741  	}, func() {})
  1742  	if err != nil {
  1743  		return err
  1744  	}
  1745  
  1746  	for _, chanID := range chanIDs {
  1747  		c.rejectCache.remove(chanID)
  1748  		c.chanCache.remove(chanID)
  1749  	}
  1750  
  1751  	return nil
  1752  }
  1753  
  1754  // ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
  1755  // passed channel point (outpoint). If the passed channel doesn't exist within
  1756  // the database, then ErrEdgeNotFound is returned.
  1757  func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
  1758  	var chanID uint64
  1759  	if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  1760  		var err error
  1761  		chanID, err = getChanID(tx, chanPoint)
  1762  		return err
  1763  	}, func() {
  1764  		chanID = 0
  1765  	}); err != nil {
  1766  		return 0, err
  1767  	}
  1768  
  1769  	return chanID, nil
  1770  }
  1771  
  1772  // getChanID returns the assigned channel ID for a given channel point.
  1773  func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
  1774  	var b bytes.Buffer
  1775  	if err := writeOutpoint(&b, chanPoint); err != nil {
  1776  		return 0, err
  1777  	}
  1778  
  1779  	edges := tx.ReadBucket(edgeBucket)
  1780  	if edges == nil {
  1781  		return 0, ErrGraphNoEdgesFound
  1782  	}
  1783  	chanIndex := edges.NestedReadBucket(channelPointBucket)
  1784  	if chanIndex == nil {
  1785  		return 0, ErrGraphNoEdgesFound
  1786  	}
  1787  
  1788  	chanIDBytes := chanIndex.Get(b.Bytes())
  1789  	if chanIDBytes == nil {
  1790  		return 0, ErrEdgeNotFound
  1791  	}
  1792  
  1793  	chanID := byteOrder.Uint64(chanIDBytes)
  1794  
  1795  	return chanID, nil
  1796  }
  1797  
  1798  // TODO(roasbeef): allow updates to use Batch?
  1799  
  1800  // HighestChanID returns the "highest" known channel ID in the channel graph.
  1801  // This represents the "newest" channel from the PoV of the chain. This method
  1802  // can be used by peers to quickly determine if they're graphs are in sync.
  1803  func (c *ChannelGraph) HighestChanID() (uint64, error) {
  1804  	var cid uint64
  1805  
  1806  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  1807  		edges := tx.ReadBucket(edgeBucket)
  1808  		if edges == nil {
  1809  			return ErrGraphNoEdgesFound
  1810  		}
  1811  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  1812  		if edgeIndex == nil {
  1813  			return ErrGraphNoEdgesFound
  1814  		}
  1815  
  1816  		// In order to find the highest chan ID, we'll fetch a cursor
  1817  		// and use that to seek to the "end" of our known rage.
  1818  		cidCursor := edgeIndex.ReadCursor()
  1819  
  1820  		lastChanID, _ := cidCursor.Last()
  1821  
  1822  		// If there's no key, then this means that we don't actually
  1823  		// know of any channels, so we'll return a predicable error.
  1824  		if lastChanID == nil {
  1825  			return ErrGraphNoEdgesFound
  1826  		}
  1827  
  1828  		// Otherwise, we'll de serialize the channel ID and return it
  1829  		// to the caller.
  1830  		cid = byteOrder.Uint64(lastChanID)
  1831  		return nil
  1832  	}, func() {
  1833  		cid = 0
  1834  	})
  1835  	if err != nil && err != ErrGraphNoEdgesFound {
  1836  		return 0, err
  1837  	}
  1838  
  1839  	return cid, nil
  1840  }
  1841  
  1842  // ChannelEdge represents the complete set of information for a channel edge in
  1843  // the known channel graph. This struct couples the core information of the
  1844  // edge as well as each of the known advertised edge policies.
  1845  type ChannelEdge struct {
  1846  	// Info contains all the static information describing the channel.
  1847  	Info *ChannelEdgeInfo
  1848  
  1849  	// Policy1 points to the "first" edge policy of the channel containing
  1850  	// the dynamic information required to properly route through the edge.
  1851  	Policy1 *ChannelEdgePolicy
  1852  
  1853  	// Policy2 points to the "second" edge policy of the channel containing
  1854  	// the dynamic information required to properly route through the edge.
  1855  	Policy2 *ChannelEdgePolicy
  1856  }
  1857  
  1858  // ChanUpdatesInHorizon returns all the known channel edges which have at least
  1859  // one edge that has an update timestamp within the specified horizon.
  1860  func (c *ChannelGraph) ChanUpdatesInHorizon(startTime,
  1861  	endTime time.Time) ([]ChannelEdge, error) {
  1862  
  1863  	// To ensure we don't return duplicate ChannelEdges, we'll use an
  1864  	// additional map to keep track of the edges already seen to prevent
  1865  	// re-adding it.
  1866  	var edgesSeen map[uint64]struct{}
  1867  	var edgesToCache map[uint64]ChannelEdge
  1868  	var edgesInHorizon []ChannelEdge
  1869  
  1870  	c.cacheMu.Lock()
  1871  	defer c.cacheMu.Unlock()
  1872  
  1873  	var hits int
  1874  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  1875  		edges := tx.ReadBucket(edgeBucket)
  1876  		if edges == nil {
  1877  			return ErrGraphNoEdgesFound
  1878  		}
  1879  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  1880  		if edgeIndex == nil {
  1881  			return ErrGraphNoEdgesFound
  1882  		}
  1883  		edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
  1884  		if edgeUpdateIndex == nil {
  1885  			return ErrGraphNoEdgesFound
  1886  		}
  1887  
  1888  		nodes := tx.ReadBucket(nodeBucket)
  1889  		if nodes == nil {
  1890  			return ErrGraphNodesNotFound
  1891  		}
  1892  
  1893  		// We'll now obtain a cursor to perform a range query within
  1894  		// the index to find all channels within the horizon.
  1895  		updateCursor := edgeUpdateIndex.ReadCursor()
  1896  
  1897  		var startTimeBytes, endTimeBytes [8 + 8]byte
  1898  		byteOrder.PutUint64(
  1899  			startTimeBytes[:8], uint64(startTime.Unix()),
  1900  		)
  1901  		byteOrder.PutUint64(
  1902  			endTimeBytes[:8], uint64(endTime.Unix()),
  1903  		)
  1904  
  1905  		// With our start and end times constructed, we'll step through
  1906  		// the index collecting the info and policy of each update of
  1907  		// each channel that has a last update within the time range.
  1908  		for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
  1909  			bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
  1910  
  1911  			// We have a new eligible entry, so we'll slice of the
  1912  			// chan ID so we can query it in the DB.
  1913  			chanID := indexKey[8:]
  1914  
  1915  			// If we've already retrieved the info and policies for
  1916  			// this edge, then we can skip it as we don't need to do
  1917  			// so again.
  1918  			chanIDInt := byteOrder.Uint64(chanID)
  1919  			if _, ok := edgesSeen[chanIDInt]; ok {
  1920  				continue
  1921  			}
  1922  
  1923  			if channel, ok := c.chanCache.get(chanIDInt); ok {
  1924  				hits++
  1925  				edgesSeen[chanIDInt] = struct{}{}
  1926  				edgesInHorizon = append(edgesInHorizon, channel)
  1927  				continue
  1928  			}
  1929  
  1930  			// First, we'll fetch the static edge information.
  1931  			edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
  1932  			if err != nil {
  1933  				chanID := byteOrder.Uint64(chanID)
  1934  				return fmt.Errorf("unable to fetch info for "+
  1935  					"edge with chan_id=%v: %v", chanID, err)
  1936  			}
  1937  			edgeInfo.db = c.db
  1938  
  1939  			// With the static information obtained, we'll now
  1940  			// fetch the dynamic policy info.
  1941  			edge1, edge2, err := fetchChanEdgePolicies(
  1942  				edgeIndex, edges, nodes, chanID, c.db,
  1943  			)
  1944  			if err != nil {
  1945  				chanID := byteOrder.Uint64(chanID)
  1946  				return fmt.Errorf("unable to fetch policies "+
  1947  					"for edge with chan_id=%v: %v", chanID,
  1948  					err)
  1949  			}
  1950  
  1951  			// Finally, we'll collate this edge with the rest of
  1952  			// edges to be returned.
  1953  			edgesSeen[chanIDInt] = struct{}{}
  1954  			channel := ChannelEdge{
  1955  				Info:    &edgeInfo,
  1956  				Policy1: edge1,
  1957  				Policy2: edge2,
  1958  			}
  1959  			edgesInHorizon = append(edgesInHorizon, channel)
  1960  			edgesToCache[chanIDInt] = channel
  1961  		}
  1962  
  1963  		return nil
  1964  	}, func() {
  1965  		edgesSeen = make(map[uint64]struct{})
  1966  		edgesToCache = make(map[uint64]ChannelEdge)
  1967  		edgesInHorizon = nil
  1968  	})
  1969  	switch {
  1970  	case err == ErrGraphNoEdgesFound:
  1971  		fallthrough
  1972  	case err == ErrGraphNodesNotFound:
  1973  		break
  1974  
  1975  	case err != nil:
  1976  		return nil, err
  1977  	}
  1978  
  1979  	// Insert any edges loaded from disk into the cache.
  1980  	for chanid, channel := range edgesToCache {
  1981  		c.chanCache.insert(chanid, channel)
  1982  	}
  1983  
  1984  	log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
  1985  		float64(hits)/float64(len(edgesInHorizon)), hits,
  1986  		len(edgesInHorizon))
  1987  
  1988  	return edgesInHorizon, nil
  1989  }
  1990  
  1991  // NodeUpdatesInHorizon returns all the known lightning node which have an
  1992  // update timestamp within the passed range. This method can be used by two
  1993  // nodes to quickly determine if they have the same set of up to date node
  1994  // announcements.
  1995  func (c *ChannelGraph) NodeUpdatesInHorizon(startTime,
  1996  	endTime time.Time) ([]LightningNode, error) {
  1997  
  1998  	var nodesInHorizon []LightningNode
  1999  
  2000  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  2001  		nodes := tx.ReadBucket(nodeBucket)
  2002  		if nodes == nil {
  2003  			return ErrGraphNodesNotFound
  2004  		}
  2005  
  2006  		nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
  2007  		if nodeUpdateIndex == nil {
  2008  			return ErrGraphNodesNotFound
  2009  		}
  2010  
  2011  		// We'll now obtain a cursor to perform a range query within
  2012  		// the index to find all node announcements within the horizon.
  2013  		updateCursor := nodeUpdateIndex.ReadCursor()
  2014  
  2015  		var startTimeBytes, endTimeBytes [8 + 33]byte
  2016  		byteOrder.PutUint64(
  2017  			startTimeBytes[:8], uint64(startTime.Unix()),
  2018  		)
  2019  		byteOrder.PutUint64(
  2020  			endTimeBytes[:8], uint64(endTime.Unix()),
  2021  		)
  2022  
  2023  		// With our start and end times constructed, we'll step through
  2024  		// the index collecting info for each node within the time
  2025  		// range.
  2026  		for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
  2027  			bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
  2028  
  2029  			nodePub := indexKey[8:]
  2030  			node, err := fetchLightningNode(nodes, nodePub)
  2031  			if err != nil {
  2032  				return err
  2033  			}
  2034  			node.db = c.db
  2035  
  2036  			nodesInHorizon = append(nodesInHorizon, node)
  2037  		}
  2038  
  2039  		return nil
  2040  	}, func() {
  2041  		nodesInHorizon = nil
  2042  	})
  2043  	switch {
  2044  	case err == ErrGraphNoEdgesFound:
  2045  		fallthrough
  2046  	case err == ErrGraphNodesNotFound:
  2047  		break
  2048  
  2049  	case err != nil:
  2050  		return nil, err
  2051  	}
  2052  
  2053  	return nodesInHorizon, nil
  2054  }
  2055  
  2056  // FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
  2057  // ID's that we don't know and are not known zombies of the passed set. In other
  2058  // words, we perform a set difference of our set of chan ID's and the ones
  2059  // passed in. This method can be used by callers to determine the set of
  2060  // channels another peer knows of that we don't.
  2061  func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, error) {
  2062  	var newChanIDs []uint64
  2063  
  2064  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  2065  		edges := tx.ReadBucket(edgeBucket)
  2066  		if edges == nil {
  2067  			return ErrGraphNoEdgesFound
  2068  		}
  2069  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  2070  		if edgeIndex == nil {
  2071  			return ErrGraphNoEdgesFound
  2072  		}
  2073  
  2074  		// Fetch the zombie index, it may not exist if no edges have
  2075  		// ever been marked as zombies. If the index has been
  2076  		// initialized, we will use it later to skip known zombie edges.
  2077  		zombieIndex := edges.NestedReadBucket(zombieBucket)
  2078  
  2079  		// We'll run through the set of chanIDs and collate only the
  2080  		// set of channel that are unable to be found within our db.
  2081  		var cidBytes [8]byte
  2082  		for _, cid := range chanIDs {
  2083  			byteOrder.PutUint64(cidBytes[:], cid)
  2084  
  2085  			// If the edge is already known, skip it.
  2086  			if v := edgeIndex.Get(cidBytes[:]); v != nil {
  2087  				continue
  2088  			}
  2089  
  2090  			// If the edge is a known zombie, skip it.
  2091  			if zombieIndex != nil {
  2092  				isZombie, _, _ := isZombieEdge(zombieIndex, cid)
  2093  				if isZombie {
  2094  					continue
  2095  				}
  2096  			}
  2097  
  2098  			newChanIDs = append(newChanIDs, cid)
  2099  		}
  2100  
  2101  		return nil
  2102  	}, func() {
  2103  		newChanIDs = nil
  2104  	})
  2105  	switch {
  2106  	// If we don't know of any edges yet, then we'll return the entire set
  2107  	// of chan IDs specified.
  2108  	case err == ErrGraphNoEdgesFound:
  2109  		return chanIDs, nil
  2110  
  2111  	case err != nil:
  2112  		return nil, err
  2113  	}
  2114  
  2115  	return newChanIDs, nil
  2116  }
  2117  
  2118  // BlockChannelRange represents a range of channels for a given block height.
  2119  type BlockChannelRange struct {
  2120  	// Height is the height of the block all of the channels below were
  2121  	// included in.
  2122  	Height uint32
  2123  
  2124  	// Channels is the list of channels identified by their short ID
  2125  	// representation known to us that were included in the block height
  2126  	// above.
  2127  	Channels []lnwire.ShortChannelID
  2128  }
  2129  
  2130  // FilterChannelRange returns the channel ID's of all known channels which were
  2131  // mined in a block height within the passed range. The channel IDs are grouped
  2132  // by their common block height. This method can be used to quickly share with a
  2133  // peer the set of channels we know of within a particular range to catch them
  2134  // up after a period of time offline.
  2135  func (c *ChannelGraph) FilterChannelRange(startHeight,
  2136  	endHeight uint32) ([]BlockChannelRange, error) {
  2137  
  2138  	startChanID := &lnwire.ShortChannelID{
  2139  		BlockHeight: startHeight,
  2140  	}
  2141  
  2142  	endChanID := lnwire.ShortChannelID{
  2143  		BlockHeight: endHeight,
  2144  		TxIndex:     math.MaxUint32 & 0x00ffffff,
  2145  		TxPosition:  math.MaxUint16,
  2146  	}
  2147  
  2148  	// As we need to perform a range scan, we'll convert the starting and
  2149  	// ending height to their corresponding values when encoded using short
  2150  	// channel ID's.
  2151  	var chanIDStart, chanIDEnd [8]byte
  2152  	byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
  2153  	byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
  2154  
  2155  	var channelsPerBlock map[uint32][]lnwire.ShortChannelID
  2156  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  2157  		edges := tx.ReadBucket(edgeBucket)
  2158  		if edges == nil {
  2159  			return ErrGraphNoEdgesFound
  2160  		}
  2161  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  2162  		if edgeIndex == nil {
  2163  			return ErrGraphNoEdgesFound
  2164  		}
  2165  
  2166  		cursor := edgeIndex.ReadCursor()
  2167  
  2168  		// We'll now iterate through the database, and find each
  2169  		// channel ID that resides within the specified range.
  2170  		for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
  2171  			bytes.Compare(k, chanIDEnd[:]) <= 0; k, _ = cursor.Next() {
  2172  
  2173  			// This channel ID rests within the target range, so
  2174  			// we'll add it to our returned set.
  2175  			rawCid := byteOrder.Uint64(k)
  2176  			cid := lnwire.NewShortChanIDFromInt(rawCid)
  2177  			channelsPerBlock[cid.BlockHeight] = append(
  2178  				channelsPerBlock[cid.BlockHeight], cid,
  2179  			)
  2180  		}
  2181  
  2182  		return nil
  2183  	}, func() {
  2184  		channelsPerBlock = make(map[uint32][]lnwire.ShortChannelID)
  2185  	})
  2186  
  2187  	switch {
  2188  	// If we don't know of any channels yet, then there's nothing to
  2189  	// filter, so we'll return an empty slice.
  2190  	case err == ErrGraphNoEdgesFound || len(channelsPerBlock) == 0:
  2191  		return nil, nil
  2192  
  2193  	case err != nil:
  2194  		return nil, err
  2195  	}
  2196  
  2197  	// Return the channel ranges in ascending block height order.
  2198  	blocks := make([]uint32, 0, len(channelsPerBlock))
  2199  	for block := range channelsPerBlock {
  2200  		blocks = append(blocks, block)
  2201  	}
  2202  	sort.Slice(blocks, func(i, j int) bool {
  2203  		return blocks[i] < blocks[j]
  2204  	})
  2205  
  2206  	channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
  2207  	for _, block := range blocks {
  2208  		channelRanges = append(channelRanges, BlockChannelRange{
  2209  			Height:   block,
  2210  			Channels: channelsPerBlock[block],
  2211  		})
  2212  	}
  2213  
  2214  	return channelRanges, nil
  2215  }
  2216  
  2217  // FetchChanInfos returns the set of channel edges that correspond to the passed
  2218  // channel ID's. If an edge is the query is unknown to the database, it will
  2219  // skipped and the result will contain only those edges that exist at the time
  2220  // of the query. This can be used to respond to peer queries that are seeking to
  2221  // fill in gaps in their view of the channel graph.
  2222  func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
  2223  	// TODO(roasbeef): sort cids?
  2224  
  2225  	var (
  2226  		chanEdges []ChannelEdge
  2227  		cidBytes  [8]byte
  2228  	)
  2229  
  2230  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  2231  		edges := tx.ReadBucket(edgeBucket)
  2232  		if edges == nil {
  2233  			return ErrGraphNoEdgesFound
  2234  		}
  2235  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  2236  		if edgeIndex == nil {
  2237  			return ErrGraphNoEdgesFound
  2238  		}
  2239  		nodes := tx.ReadBucket(nodeBucket)
  2240  		if nodes == nil {
  2241  			return ErrGraphNotFound
  2242  		}
  2243  
  2244  		for _, cid := range chanIDs {
  2245  			byteOrder.PutUint64(cidBytes[:], cid)
  2246  
  2247  			// First, we'll fetch the static edge information. If
  2248  			// the edge is unknown, we will skip the edge and
  2249  			// continue gathering all known edges.
  2250  			edgeInfo, err := fetchChanEdgeInfo(
  2251  				edgeIndex, cidBytes[:],
  2252  			)
  2253  			switch {
  2254  			case err == ErrEdgeNotFound:
  2255  				continue
  2256  			case err != nil:
  2257  				return err
  2258  			}
  2259  			edgeInfo.db = c.db
  2260  
  2261  			// With the static information obtained, we'll now
  2262  			// fetch the dynamic policy info.
  2263  			edge1, edge2, err := fetchChanEdgePolicies(
  2264  				edgeIndex, edges, nodes, cidBytes[:], c.db,
  2265  			)
  2266  			if err != nil {
  2267  				return err
  2268  			}
  2269  
  2270  			chanEdges = append(chanEdges, ChannelEdge{
  2271  				Info:    &edgeInfo,
  2272  				Policy1: edge1,
  2273  				Policy2: edge2,
  2274  			})
  2275  		}
  2276  		return nil
  2277  	}, func() {
  2278  		chanEdges = nil
  2279  	})
  2280  	if err != nil {
  2281  		return nil, err
  2282  	}
  2283  
  2284  	return chanEdges, nil
  2285  }
  2286  
  2287  func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
  2288  	edge1, edge2 *ChannelEdgePolicy) error {
  2289  
  2290  	// First, we'll fetch the edge update index bucket which currently
  2291  	// stores an entry for the channel we're about to delete.
  2292  	updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
  2293  	if updateIndex == nil {
  2294  		// No edges in bucket, return early.
  2295  		return nil
  2296  	}
  2297  
  2298  	// Now that we have the bucket, we'll attempt to construct a template
  2299  	// for the index key: updateTime || chanid.
  2300  	var indexKey [8 + 8]byte
  2301  	byteOrder.PutUint64(indexKey[8:], chanID)
  2302  
  2303  	// With the template constructed, we'll attempt to delete an entry that
  2304  	// would have been created by both edges: we'll alternate the update
  2305  	// times, as one may had overridden the other.
  2306  	if edge1 != nil {
  2307  		byteOrder.PutUint64(indexKey[:8], uint64(edge1.LastUpdate.Unix()))
  2308  		if err := updateIndex.Delete(indexKey[:]); err != nil {
  2309  			return err
  2310  		}
  2311  	}
  2312  
  2313  	// We'll also attempt to delete the entry that may have been created by
  2314  	// the second edge.
  2315  	if edge2 != nil {
  2316  		byteOrder.PutUint64(indexKey[:8], uint64(edge2.LastUpdate.Unix()))
  2317  		if err := updateIndex.Delete(indexKey[:]); err != nil {
  2318  			return err
  2319  		}
  2320  	}
  2321  
  2322  	return nil
  2323  }
  2324  
  2325  func (c *ChannelGraph) delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex,
  2326  	nodes kvdb.RwBucket, chanID []byte, isZombie, strictZombie bool) error {
  2327  
  2328  	edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
  2329  	if err != nil {
  2330  		return err
  2331  	}
  2332  
  2333  	if c.graphCache != nil {
  2334  		c.graphCache.RemoveChannel(
  2335  			edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes,
  2336  			edgeInfo.ChannelID,
  2337  		)
  2338  	}
  2339  
  2340  	// We'll also remove the entry in the edge update index bucket before
  2341  	// we delete the edges themselves so we can access their last update
  2342  	// times.
  2343  	cid := byteOrder.Uint64(chanID)
  2344  	edge1, edge2, err := fetchChanEdgePolicies(
  2345  		edgeIndex, edges, nodes, chanID, nil,
  2346  	)
  2347  	if err != nil {
  2348  		return err
  2349  	}
  2350  	err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
  2351  	if err != nil {
  2352  		return err
  2353  	}
  2354  
  2355  	// The edge key is of the format pubKey || chanID. First we construct
  2356  	// the latter half, populating the channel ID.
  2357  	var edgeKey [33 + 8]byte
  2358  	copy(edgeKey[33:], chanID)
  2359  
  2360  	// With the latter half constructed, copy over the first public key to
  2361  	// delete the edge in this direction, then the second to delete the
  2362  	// edge in the opposite direction.
  2363  	copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
  2364  	if edges.Get(edgeKey[:]) != nil {
  2365  		if err := edges.Delete(edgeKey[:]); err != nil {
  2366  			return err
  2367  		}
  2368  	}
  2369  	copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
  2370  	if edges.Get(edgeKey[:]) != nil {
  2371  		if err := edges.Delete(edgeKey[:]); err != nil {
  2372  			return err
  2373  		}
  2374  	}
  2375  
  2376  	// As part of deleting the edge we also remove all disabled entries
  2377  	// from the edgePolicyDisabledIndex bucket. We do that for both directions.
  2378  	updateEdgePolicyDisabledIndex(edges, cid, false, false)
  2379  	updateEdgePolicyDisabledIndex(edges, cid, true, false)
  2380  
  2381  	// With the edge data deleted, we can purge the information from the two
  2382  	// edge indexes.
  2383  	if err := edgeIndex.Delete(chanID); err != nil {
  2384  		return err
  2385  	}
  2386  	var b bytes.Buffer
  2387  	if err := writeOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
  2388  		return err
  2389  	}
  2390  	if err := chanIndex.Delete(b.Bytes()); err != nil {
  2391  		return err
  2392  	}
  2393  
  2394  	// Finally, we'll mark the edge as a zombie within our index if it's
  2395  	// being removed due to the channel becoming a zombie. We do this to
  2396  	// ensure we don't store unnecessary data for spent channels.
  2397  	if !isZombie {
  2398  		return nil
  2399  	}
  2400  
  2401  	nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
  2402  	if strictZombie {
  2403  		nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
  2404  	}
  2405  
  2406  	return markEdgeZombie(
  2407  		zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
  2408  	)
  2409  }
  2410  
  2411  // makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
  2412  // particular pair of channel policies. The return values are one of:
  2413  //  1. (pubkey1, pubkey2)
  2414  //  2. (pubkey1, blank)
  2415  //  3. (blank, pubkey2)
  2416  //
  2417  // A blank pubkey means that corresponding node will be unable to resurrect a
  2418  // channel on its own. For example, node1 may continue to publish recent
  2419  // updates, but node2 has fallen way behind. After marking an edge as a zombie,
  2420  // we don't want another fresh update from node1 to resurrect, as the edge can
  2421  // only become live once node2 finally sends something recent.
  2422  //
  2423  // In the case where we have neither update, we allow either party to resurrect
  2424  // the channel. If the channel were to be marked zombie again, it would be
  2425  // marked with the correct lagging channel since we received an update from only
  2426  // one side.
  2427  func makeZombiePubkeys(info *ChannelEdgeInfo,
  2428  	e1, e2 *ChannelEdgePolicy) ([33]byte, [33]byte) {
  2429  
  2430  	switch {
  2431  
  2432  	// If we don't have either edge policy, we'll return both pubkeys so
  2433  	// that the channel can be resurrected by either party.
  2434  	case e1 == nil && e2 == nil:
  2435  		return info.NodeKey1Bytes, info.NodeKey2Bytes
  2436  
  2437  	// If we're missing edge1, or if both edges are present but edge1 is
  2438  	// older, we'll return edge1's pubkey and a blank pubkey for edge2. This
  2439  	// means that only an update from edge1 will be able to resurrect the
  2440  	// channel.
  2441  	case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
  2442  		return info.NodeKey1Bytes, [33]byte{}
  2443  
  2444  	// Otherwise, we're missing edge2 or edge2 is the older side, so we
  2445  	// return a blank pubkey for edge1. In this case, only an update from
  2446  	// edge2 can resurect the channel.
  2447  	default:
  2448  		return [33]byte{}, info.NodeKey2Bytes
  2449  	}
  2450  }
  2451  
  2452  // UpdateEdgePolicy updates the edge routing policy for a single directed edge
  2453  // within the database for the referenced channel. The `flags` attribute within
  2454  // the ChannelEdgePolicy determines which of the directed edges are being
  2455  // updated. If the flag is 1, then the first node's information is being
  2456  // updated, otherwise it's the second node's information. The node ordering is
  2457  // determined by the lexicographical ordering of the identity public keys of the
  2458  // nodes on either side of the channel.
  2459  func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy,
  2460  	op ...batch.SchedulerOption) error {
  2461  
  2462  	var (
  2463  		isUpdate1    bool
  2464  		edgeNotFound bool
  2465  	)
  2466  
  2467  	r := &batch.Request{
  2468  		Reset: func() {
  2469  			isUpdate1 = false
  2470  			edgeNotFound = false
  2471  		},
  2472  		Update: func(tx kvdb.RwTx) error {
  2473  			var err error
  2474  			isUpdate1, err = updateEdgePolicy(
  2475  				tx, edge, c.graphCache,
  2476  			)
  2477  
  2478  			// Silence ErrEdgeNotFound so that the batch can
  2479  			// succeed, but propagate the error via local state.
  2480  			if err == ErrEdgeNotFound {
  2481  				edgeNotFound = true
  2482  				return nil
  2483  			}
  2484  
  2485  			return err
  2486  		},
  2487  		OnCommit: func(err error) error {
  2488  			switch {
  2489  			case err != nil:
  2490  				return err
  2491  			case edgeNotFound:
  2492  				return ErrEdgeNotFound
  2493  			default:
  2494  				c.updateEdgeCache(edge, isUpdate1)
  2495  				return nil
  2496  			}
  2497  		},
  2498  	}
  2499  
  2500  	for _, f := range op {
  2501  		f(r)
  2502  	}
  2503  
  2504  	return c.chanScheduler.Execute(r)
  2505  }
  2506  
  2507  func (c *ChannelGraph) updateEdgeCache(e *ChannelEdgePolicy, isUpdate1 bool) {
  2508  	// If an entry for this channel is found in reject cache, we'll modify
  2509  	// the entry with the updated timestamp for the direction that was just
  2510  	// written. If the edge doesn't exist, we'll load the cache entry lazily
  2511  	// during the next query for this edge.
  2512  	if entry, ok := c.rejectCache.get(e.ChannelID); ok {
  2513  		if isUpdate1 {
  2514  			entry.upd1Time = e.LastUpdate.Unix()
  2515  		} else {
  2516  			entry.upd2Time = e.LastUpdate.Unix()
  2517  		}
  2518  		c.rejectCache.insert(e.ChannelID, entry)
  2519  	}
  2520  
  2521  	// If an entry for this channel is found in channel cache, we'll modify
  2522  	// the entry with the updated policy for the direction that was just
  2523  	// written. If the edge doesn't exist, we'll defer loading the info and
  2524  	// policies and lazily read from disk during the next query.
  2525  	if channel, ok := c.chanCache.get(e.ChannelID); ok {
  2526  		if isUpdate1 {
  2527  			channel.Policy1 = e
  2528  		} else {
  2529  			channel.Policy2 = e
  2530  		}
  2531  		c.chanCache.insert(e.ChannelID, channel)
  2532  	}
  2533  }
  2534  
  2535  // updateEdgePolicy attempts to update an edge's policy within the relevant
  2536  // buckets using an existing database transaction. The returned boolean will be
  2537  // true if the updated policy belongs to node1, and false if the policy belonged
  2538  // to node2.
  2539  func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy,
  2540  	graphCache *GraphCache) (bool, error) {
  2541  
  2542  	edges := tx.ReadWriteBucket(edgeBucket)
  2543  	if edges == nil {
  2544  		return false, ErrEdgeNotFound
  2545  
  2546  	}
  2547  	edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
  2548  	if edgeIndex == nil {
  2549  		return false, ErrEdgeNotFound
  2550  	}
  2551  	nodes, err := tx.CreateTopLevelBucket(nodeBucket)
  2552  	if err != nil {
  2553  		return false, err
  2554  	}
  2555  
  2556  	// Create the channelID key be converting the channel ID
  2557  	// integer into a byte slice.
  2558  	var chanID [8]byte
  2559  	byteOrder.PutUint64(chanID[:], edge.ChannelID)
  2560  
  2561  	// With the channel ID, we then fetch the value storing the two
  2562  	// nodes which connect this channel edge.
  2563  	nodeInfo := edgeIndex.Get(chanID[:])
  2564  	if nodeInfo == nil {
  2565  		return false, ErrEdgeNotFound
  2566  	}
  2567  
  2568  	// Depending on the flags value passed above, either the first
  2569  	// or second edge policy is being updated.
  2570  	var fromNode, toNode []byte
  2571  	var isUpdate1 bool
  2572  	if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
  2573  		fromNode = nodeInfo[:33]
  2574  		toNode = nodeInfo[33:66]
  2575  		isUpdate1 = true
  2576  	} else {
  2577  		fromNode = nodeInfo[33:66]
  2578  		toNode = nodeInfo[:33]
  2579  		isUpdate1 = false
  2580  	}
  2581  
  2582  	// Finally, with the direction of the edge being updated
  2583  	// identified, we update the on-disk edge representation.
  2584  	err = putChanEdgePolicy(edges, nodes, edge, fromNode, toNode)
  2585  	if err != nil {
  2586  		return false, err
  2587  	}
  2588  
  2589  	var (
  2590  		fromNodePubKey route.Vertex
  2591  		toNodePubKey   route.Vertex
  2592  	)
  2593  	copy(fromNodePubKey[:], fromNode)
  2594  	copy(toNodePubKey[:], toNode)
  2595  
  2596  	if graphCache != nil {
  2597  		graphCache.UpdatePolicy(
  2598  			edge, fromNodePubKey, toNodePubKey, isUpdate1,
  2599  		)
  2600  	}
  2601  
  2602  	return isUpdate1, nil
  2603  }
  2604  
  2605  // LightningNode represents an individual vertex/node within the channel graph.
  2606  // A node is connected to other nodes by one or more channel edges emanating
  2607  // from it. As the graph is directed, a node will also have an incoming edge
  2608  // attached to it for each outgoing edge.
  2609  type LightningNode struct {
  2610  	// PubKeyBytes is the raw bytes of the public key of the target node.
  2611  	PubKeyBytes [33]byte
  2612  	pubKey      *secp256k1.PublicKey
  2613  
  2614  	// HaveNodeAnnouncement indicates whether we received a node
  2615  	// announcement for this particular node. If true, the remaining fields
  2616  	// will be set, if false only the PubKey is known for this node.
  2617  	HaveNodeAnnouncement bool
  2618  
  2619  	// LastUpdate is the last time the vertex information for this node has
  2620  	// been updated.
  2621  	LastUpdate time.Time
  2622  
  2623  	// Address is the TCP address this node is reachable over.
  2624  	Addresses []net.Addr
  2625  
  2626  	// Color is the selected color for the node.
  2627  	Color color.RGBA
  2628  
  2629  	// Alias is a nick-name for the node. The alias can be used to confirm
  2630  	// a node's identity or to serve as a short ID for an address book.
  2631  	Alias string
  2632  
  2633  	// AuthSigBytes is the raw signature under the advertised public key
  2634  	// which serves to authenticate the attributes announced by this node.
  2635  	AuthSigBytes []byte
  2636  
  2637  	// Features is the list of protocol features supported by this node.
  2638  	Features *lnwire.FeatureVector
  2639  
  2640  	// ExtraOpaqueData is the set of data that was appended to this
  2641  	// message, some of which we may not actually know how to iterate or
  2642  	// parse. By holding onto this data, we ensure that we're able to
  2643  	// properly validate the set of signatures that cover these new fields,
  2644  	// and ensure we're able to make upgrades to the network in a forwards
  2645  	// compatible manner.
  2646  	ExtraOpaqueData []byte
  2647  
  2648  	db kvdb.Backend
  2649  
  2650  	// TODO(roasbeef): discovery will need storage to keep it's last IP
  2651  	// address and re-announce if interface changes?
  2652  
  2653  	// TODO(roasbeef): add update method and fetch?
  2654  }
  2655  
  2656  // PubKey is the node's long-term identity public key. This key will be used to
  2657  // authenticated any advertisements/updates sent by the node.
  2658  //
  2659  // NOTE: By having this method to access an attribute, we ensure we only need
  2660  // to fully deserialize the pubkey if absolutely necessary.
  2661  func (l *LightningNode) PubKey() (*secp256k1.PublicKey, error) {
  2662  	if l.pubKey != nil {
  2663  		return l.pubKey, nil
  2664  	}
  2665  
  2666  	key, err := secp256k1.ParsePubKey(l.PubKeyBytes[:])
  2667  	if err != nil {
  2668  		return nil, err
  2669  	}
  2670  	l.pubKey = key
  2671  
  2672  	return key, nil
  2673  }
  2674  
  2675  // AuthSig is a signature under the advertised public key which serves to
  2676  // authenticate the attributes announced by this node.
  2677  //
  2678  // NOTE: By having this method to access an attribute, we ensure we only need
  2679  // to fully deserialize the signature if absolutely necessary.
  2680  func (l *LightningNode) AuthSig() (*ecdsa.Signature, error) {
  2681  	return ecdsa.ParseDERSignature(l.AuthSigBytes)
  2682  }
  2683  
  2684  // AddPubKey is a setter-link method that can be used to swap out the public
  2685  // key for a node.
  2686  func (l *LightningNode) AddPubKey(key *secp256k1.PublicKey) {
  2687  	l.pubKey = key
  2688  	copy(l.PubKeyBytes[:], key.SerializeCompressed())
  2689  }
  2690  
  2691  // NodeAnnouncement retrieves the latest node announcement of the node.
  2692  func (l *LightningNode) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement,
  2693  	error) {
  2694  
  2695  	if !l.HaveNodeAnnouncement {
  2696  		return nil, fmt.Errorf("node does not have node announcement")
  2697  	}
  2698  
  2699  	alias, err := lnwire.NewNodeAlias(l.Alias)
  2700  	if err != nil {
  2701  		return nil, err
  2702  	}
  2703  
  2704  	nodeAnn := &lnwire.NodeAnnouncement{
  2705  		Features:        l.Features.RawFeatureVector,
  2706  		NodeID:          l.PubKeyBytes,
  2707  		RGBColor:        l.Color,
  2708  		Alias:           alias,
  2709  		Addresses:       l.Addresses,
  2710  		Timestamp:       uint32(l.LastUpdate.Unix()),
  2711  		ExtraOpaqueData: l.ExtraOpaqueData,
  2712  	}
  2713  
  2714  	if !signed {
  2715  		return nodeAnn, nil
  2716  	}
  2717  
  2718  	sig, err := lnwire.NewSigFromRawSignature(l.AuthSigBytes)
  2719  	if err != nil {
  2720  		return nil, err
  2721  	}
  2722  
  2723  	nodeAnn.Signature = sig
  2724  
  2725  	return nodeAnn, nil
  2726  }
  2727  
  2728  // isPublic determines whether the node is seen as public within the graph from
  2729  // the source node's point of view. An existing database transaction can also be
  2730  // specified.
  2731  func (l *LightningNode) isPublic(tx kvdb.RTx, sourcePubKey []byte) (bool, error) {
  2732  	// In order to determine whether this node is publicly advertised within
  2733  	// the graph, we'll need to look at all of its edges and check whether
  2734  	// they extend to any other node than the source node. errDone will be
  2735  	// used to terminate the check early.
  2736  	nodeIsPublic := false
  2737  	errDone := errors.New("done")
  2738  	err := l.ForEachChannel(tx, func(_ kvdb.RTx, info *ChannelEdgeInfo,
  2739  		_, _ *ChannelEdgePolicy) error {
  2740  
  2741  		// If this edge doesn't extend to the source node, we'll
  2742  		// terminate our search as we can now conclude that the node is
  2743  		// publicly advertised within the graph due to the local node
  2744  		// knowing of the current edge.
  2745  		if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
  2746  			!bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
  2747  
  2748  			nodeIsPublic = true
  2749  			return errDone
  2750  		}
  2751  
  2752  		// Since the edge _does_ extend to the source node, we'll also
  2753  		// need to ensure that this is a public edge.
  2754  		if info.AuthProof != nil {
  2755  			nodeIsPublic = true
  2756  			return errDone
  2757  		}
  2758  
  2759  		// Otherwise, we'll continue our search.
  2760  		return nil
  2761  	})
  2762  	if err != nil && err != errDone {
  2763  		return false, err
  2764  	}
  2765  
  2766  	return nodeIsPublic, nil
  2767  }
  2768  
  2769  // FetchLightningNode attempts to look up a target node by its identity public
  2770  // key. If the node isn't found in the database, then ErrGraphNodeNotFound is
  2771  // returned.
  2772  func (c *ChannelGraph) FetchLightningNode(nodePub route.Vertex) (
  2773  	*LightningNode, error) {
  2774  
  2775  	var node *LightningNode
  2776  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  2777  		// First grab the nodes bucket which stores the mapping from
  2778  		// pubKey to node information.
  2779  		nodes := tx.ReadBucket(nodeBucket)
  2780  		if nodes == nil {
  2781  			return ErrGraphNotFound
  2782  		}
  2783  
  2784  		// If a key for this serialized public key isn't found, then
  2785  		// the target node doesn't exist within the database.
  2786  		nodeBytes := nodes.Get(nodePub[:])
  2787  		if nodeBytes == nil {
  2788  			return ErrGraphNodeNotFound
  2789  		}
  2790  
  2791  		// If the node is found, then we can de deserialize the node
  2792  		// information to return to the user.
  2793  		nodeReader := bytes.NewReader(nodeBytes)
  2794  		n, err := deserializeLightningNode(nodeReader)
  2795  		if err != nil {
  2796  			return err
  2797  		}
  2798  		n.db = c.db
  2799  
  2800  		node = &n
  2801  
  2802  		return nil
  2803  	}, func() {
  2804  		node = nil
  2805  	})
  2806  	if err != nil {
  2807  		return nil, err
  2808  	}
  2809  
  2810  	return node, nil
  2811  }
  2812  
  2813  // graphCacheNode is a struct that wraps a LightningNode in a way that it can be
  2814  // cached in the graph cache.
  2815  type graphCacheNode struct {
  2816  	pubKeyBytes route.Vertex
  2817  	features    *lnwire.FeatureVector
  2818  }
  2819  
  2820  // newGraphCacheNode returns a new cache optimized node.
  2821  func newGraphCacheNode(pubKey route.Vertex,
  2822  	features *lnwire.FeatureVector) *graphCacheNode {
  2823  
  2824  	return &graphCacheNode{
  2825  		pubKeyBytes: pubKey,
  2826  		features:    features,
  2827  	}
  2828  }
  2829  
  2830  // PubKey returns the node's public identity key.
  2831  func (n *graphCacheNode) PubKey() route.Vertex {
  2832  	return n.pubKeyBytes
  2833  }
  2834  
  2835  // Features returns the node's features.
  2836  func (n *graphCacheNode) Features() *lnwire.FeatureVector {
  2837  	return n.features
  2838  }
  2839  
  2840  // ForEachChannel iterates through all channels of this node, executing the
  2841  // passed callback with an edge info structure and the policies of each end
  2842  // of the channel. The first edge policy is the outgoing edge *to* the
  2843  // connecting node, while the second is the incoming edge *from* the
  2844  // connecting node. If the callback returns an error, then the iteration is
  2845  // halted with the error propagated back up to the caller.
  2846  //
  2847  // Unknown policies are passed into the callback as nil values.
  2848  func (n *graphCacheNode) ForEachChannel(tx kvdb.RTx,
  2849  	cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy,
  2850  		*ChannelEdgePolicy) error) error {
  2851  
  2852  	return nodeTraversal(tx, n.pubKeyBytes[:], nil, cb)
  2853  }
  2854  
  2855  var _ GraphCacheNode = (*graphCacheNode)(nil)
  2856  
  2857  // HasLightningNode determines if the graph has a vertex identified by the
  2858  // target node identity public key. If the node exists in the database, a
  2859  // timestamp of when the data for the node was lasted updated is returned along
  2860  // with a true boolean. Otherwise, an empty time.Time is returned with a false
  2861  // boolean.
  2862  func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, error) {
  2863  	var (
  2864  		updateTime time.Time
  2865  		exists     bool
  2866  	)
  2867  
  2868  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  2869  		// First grab the nodes bucket which stores the mapping from
  2870  		// pubKey to node information.
  2871  		nodes := tx.ReadBucket(nodeBucket)
  2872  		if nodes == nil {
  2873  			return ErrGraphNotFound
  2874  		}
  2875  
  2876  		// If a key for this serialized public key isn't found, we can
  2877  		// exit early.
  2878  		nodeBytes := nodes.Get(nodePub[:])
  2879  		if nodeBytes == nil {
  2880  			exists = false
  2881  			return nil
  2882  		}
  2883  
  2884  		// Otherwise we continue on to obtain the time stamp
  2885  		// representing the last time the data for this node was
  2886  		// updated.
  2887  		nodeReader := bytes.NewReader(nodeBytes)
  2888  		node, err := deserializeLightningNode(nodeReader)
  2889  		if err != nil {
  2890  			return err
  2891  		}
  2892  
  2893  		exists = true
  2894  		updateTime = node.LastUpdate
  2895  		return nil
  2896  	}, func() {
  2897  		updateTime = time.Time{}
  2898  		exists = false
  2899  	})
  2900  	if err != nil {
  2901  		return time.Time{}, exists, err
  2902  	}
  2903  
  2904  	return updateTime, exists, nil
  2905  }
  2906  
  2907  // nodeTraversal is used to traverse all channels of a node given by its
  2908  // public key and passes channel information into the specified callback.
  2909  func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
  2910  	cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
  2911  
  2912  	traversal := func(tx kvdb.RTx) error {
  2913  		nodes := tx.ReadBucket(nodeBucket)
  2914  		if nodes == nil {
  2915  			return ErrGraphNotFound
  2916  		}
  2917  		edges := tx.ReadBucket(edgeBucket)
  2918  		if edges == nil {
  2919  			return ErrGraphNotFound
  2920  		}
  2921  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  2922  		if edgeIndex == nil {
  2923  			return ErrGraphNoEdgesFound
  2924  		}
  2925  
  2926  		// In order to reach all the edges for this node, we take
  2927  		// advantage of the construction of the key-space within the
  2928  		// edge bucket. The keys are stored in the form: pubKey ||
  2929  		// chanID. Therefore, starting from a chanID of zero, we can
  2930  		// scan forward in the bucket, grabbing all the edges for the
  2931  		// node. Once the prefix no longer matches, then we know we're
  2932  		// done.
  2933  		var nodeStart [33 + 8]byte
  2934  		copy(nodeStart[:], nodePub)
  2935  		copy(nodeStart[33:], chanStart[:])
  2936  
  2937  		// Starting from the key pubKey || 0, we seek forward in the
  2938  		// bucket until the retrieved key no longer has the public key
  2939  		// as its prefix. This indicates that we've stepped over into
  2940  		// another node's edges, so we can terminate our scan.
  2941  		edgeCursor := edges.ReadCursor()
  2942  		for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() {
  2943  			// If the prefix still matches, the channel id is
  2944  			// returned in nodeEdge. Channel id is used to lookup
  2945  			// the node at the other end of the channel and both
  2946  			// edge policies.
  2947  			chanID := nodeEdge[33:]
  2948  			edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
  2949  			if err != nil {
  2950  				return err
  2951  			}
  2952  			edgeInfo.db = db
  2953  
  2954  			outgoingPolicy, err := fetchChanEdgePolicy(
  2955  				edges, chanID, nodePub, nodes,
  2956  			)
  2957  			if err != nil {
  2958  				return err
  2959  			}
  2960  
  2961  			otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
  2962  			if err != nil {
  2963  				return err
  2964  			}
  2965  
  2966  			incomingPolicy, err := fetchChanEdgePolicy(
  2967  				edges, chanID, otherNode[:], nodes,
  2968  			)
  2969  			if err != nil {
  2970  				return err
  2971  			}
  2972  
  2973  			// Finally, we execute the callback.
  2974  			err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
  2975  			if err != nil {
  2976  				return err
  2977  			}
  2978  		}
  2979  
  2980  		return nil
  2981  	}
  2982  
  2983  	// If no transaction was provided, then we'll create a new transaction
  2984  	// to execute the transaction within.
  2985  	if tx == nil {
  2986  		return kvdb.View(db, traversal, func() {})
  2987  	}
  2988  
  2989  	// Otherwise, we re-use the existing transaction to execute the graph
  2990  	// traversal.
  2991  	return traversal(tx)
  2992  }
  2993  
  2994  // ForEachChannel iterates through all channels of this node, executing the
  2995  // passed callback with an edge info structure and the policies of each end
  2996  // of the channel. The first edge policy is the outgoing edge *to* the
  2997  // connecting node, while the second is the incoming edge *from* the
  2998  // connecting node. If the callback returns an error, then the iteration is
  2999  // halted with the error propagated back up to the caller.
  3000  //
  3001  // Unknown policies are passed into the callback as nil values.
  3002  //
  3003  // If the caller wishes to re-use an existing bboltdb transaction, then it
  3004  // should be passed as the first argument.  Otherwise the first argument should
  3005  // be nil and a fresh transaction will be created to execute the graph
  3006  // traversal.
  3007  func (l *LightningNode) ForEachChannel(tx kvdb.RTx,
  3008  	cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy,
  3009  		*ChannelEdgePolicy) error) error {
  3010  
  3011  	nodePub := l.PubKeyBytes[:]
  3012  	db := l.db
  3013  
  3014  	return nodeTraversal(tx, nodePub, db, cb)
  3015  }
  3016  
  3017  // ChannelEdgeInfo represents a fully authenticated channel along with all its
  3018  // unique attributes. Once an authenticated channel announcement has been
  3019  // processed on the network, then an instance of ChannelEdgeInfo encapsulating
  3020  // the channels attributes is stored. The other portions relevant to routing
  3021  // policy of a channel are stored within a ChannelEdgePolicy for each direction
  3022  // of the channel.
  3023  type ChannelEdgeInfo struct {
  3024  	// ChannelID is the unique channel ID for the channel. The first 3
  3025  	// bytes are the block height, the next 3 the index within the block,
  3026  	// and the last 2 bytes are the output index for the channel.
  3027  	ChannelID uint64
  3028  
  3029  	// ChainHash is the hash that uniquely identifies the chain that this
  3030  	// channel was opened within.
  3031  	//
  3032  	// TODO(roasbeef): need to modify db keying for multi-chain
  3033  	//  * must add chain hash to prefix as well
  3034  	ChainHash chainhash.Hash
  3035  
  3036  	// NodeKey1Bytes is the raw public key of the first node.
  3037  	NodeKey1Bytes [33]byte
  3038  	nodeKey1      *secp256k1.PublicKey
  3039  
  3040  	// NodeKey2Bytes is the raw public key of the first node.
  3041  	NodeKey2Bytes [33]byte
  3042  	nodeKey2      *secp256k1.PublicKey
  3043  
  3044  	// DecredKey1Bytes is the raw public key of the first node.
  3045  	DecredKey1Bytes [33]byte
  3046  	decredKey1      *secp256k1.PublicKey
  3047  
  3048  	// DecredKey2Bytes is the raw public key of the first node.
  3049  	DecredKey2Bytes [33]byte
  3050  	decredKey2      *secp256k1.PublicKey
  3051  
  3052  	// Features is an opaque byte slice that encodes the set of channel
  3053  	// specific features that this channel edge supports.
  3054  	Features []byte
  3055  
  3056  	// AuthProof is the authentication proof for this channel. This proof
  3057  	// contains a set of signatures binding four identities, which attests
  3058  	// to the legitimacy of the advertised channel.
  3059  	AuthProof *ChannelAuthProof
  3060  
  3061  	// ChannelPoint is the funding outpoint of the channel. This can be
  3062  	// used to uniquely identify the channel within the channel graph.
  3063  	ChannelPoint wire.OutPoint
  3064  
  3065  	// Capacity is the total capacity of the channel, this is determined by
  3066  	// the value output in the outpoint that created this channel.
  3067  	Capacity dcrutil.Amount
  3068  
  3069  	// ExtraOpaqueData is the set of data that was appended to this
  3070  	// message, some of which we may not actually know how to iterate or
  3071  	// parse. By holding onto this data, we ensure that we're able to
  3072  	// properly validate the set of signatures that cover these new fields,
  3073  	// and ensure we're able to make upgrades to the network in a forwards
  3074  	// compatible manner.
  3075  	ExtraOpaqueData []byte
  3076  
  3077  	db kvdb.Backend
  3078  }
  3079  
  3080  // AddNodeKeys is a setter-like method that can be used to replace the set of
  3081  // keys for the target ChannelEdgeInfo.
  3082  func (c *ChannelEdgeInfo) AddNodeKeys(nodeKey1, nodeKey2, decredKey1,
  3083  	decredKey2 *secp256k1.PublicKey) {
  3084  
  3085  	c.nodeKey1 = nodeKey1
  3086  	copy(c.NodeKey1Bytes[:], c.nodeKey1.SerializeCompressed())
  3087  
  3088  	c.nodeKey2 = nodeKey2
  3089  	copy(c.NodeKey2Bytes[:], nodeKey2.SerializeCompressed())
  3090  
  3091  	c.decredKey1 = decredKey1
  3092  	copy(c.DecredKey1Bytes[:], c.decredKey1.SerializeCompressed())
  3093  
  3094  	c.decredKey2 = decredKey2
  3095  	copy(c.DecredKey2Bytes[:], decredKey2.SerializeCompressed())
  3096  }
  3097  
  3098  // NodeKey1 is the identity public key of the "first" node that was involved in
  3099  // the creation of this channel. A node is considered "first" if the
  3100  // lexicographical ordering the its serialized public key is "smaller" than
  3101  // that of the other node involved in channel creation.
  3102  //
  3103  // NOTE: By having this method to access an attribute, we ensure we only need
  3104  // to fully deserialize the pubkey if absolutely necessary.
  3105  func (c *ChannelEdgeInfo) NodeKey1() (*secp256k1.PublicKey, error) {
  3106  	if c.nodeKey1 != nil {
  3107  		return c.nodeKey1, nil
  3108  	}
  3109  
  3110  	key, err := secp256k1.ParsePubKey(c.NodeKey1Bytes[:])
  3111  	if err != nil {
  3112  		return nil, err
  3113  	}
  3114  	c.nodeKey1 = key
  3115  
  3116  	return key, nil
  3117  }
  3118  
  3119  // NodeKey2 is the identity public key of the "second" node that was
  3120  // involved in the creation of this channel. A node is considered
  3121  // "second" if the lexicographical ordering the its serialized public
  3122  // key is "larger" than that of the other node involved in channel
  3123  // creation.
  3124  //
  3125  // NOTE: By having this method to access an attribute, we ensure we only need
  3126  // to fully deserialize the pubkey if absolutely necessary.
  3127  func (c *ChannelEdgeInfo) NodeKey2() (*secp256k1.PublicKey, error) {
  3128  	if c.nodeKey2 != nil {
  3129  		return c.nodeKey2, nil
  3130  	}
  3131  
  3132  	key, err := secp256k1.ParsePubKey(c.NodeKey2Bytes[:])
  3133  	if err != nil {
  3134  		return nil, err
  3135  	}
  3136  	c.nodeKey2 = key
  3137  
  3138  	return key, nil
  3139  }
  3140  
  3141  // DecredKey1 is the Decred multi-sig key belonging to the first
  3142  // node, that was involved in the funding transaction that originally
  3143  // created the channel that this struct represents.
  3144  //
  3145  // NOTE: By having this method to access an attribute, we ensure we only need
  3146  // to fully deserialize the pubkey if absolutely necessary.
  3147  func (c *ChannelEdgeInfo) DecredKey1() (*secp256k1.PublicKey, error) {
  3148  	if c.decredKey1 != nil {
  3149  		return c.decredKey1, nil
  3150  	}
  3151  
  3152  	key, err := secp256k1.ParsePubKey(c.DecredKey1Bytes[:])
  3153  	if err != nil {
  3154  		return nil, err
  3155  	}
  3156  	c.decredKey1 = key
  3157  
  3158  	return key, nil
  3159  }
  3160  
  3161  // DecredKey2 is the Decred multi-sig key belonging to the second
  3162  // node, that was involved in the funding transaction that originally
  3163  // created the channel that this struct represents.
  3164  //
  3165  // NOTE: By having this method to access an attribute, we ensure we only need
  3166  // to fully deserialize the pubkey if absolutely necessary.
  3167  func (c *ChannelEdgeInfo) DecredKey2() (*secp256k1.PublicKey, error) {
  3168  	if c.decredKey2 != nil {
  3169  		return c.decredKey2, nil
  3170  	}
  3171  
  3172  	key, err := secp256k1.ParsePubKey(c.DecredKey2Bytes[:])
  3173  	if err != nil {
  3174  		return nil, err
  3175  	}
  3176  	c.decredKey2 = key
  3177  
  3178  	return key, nil
  3179  }
  3180  
  3181  // OtherNodeKeyBytes returns the node key bytes of the other end of
  3182  // the channel.
  3183  func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) (
  3184  	[33]byte, error) {
  3185  
  3186  	switch {
  3187  	case bytes.Equal(c.NodeKey1Bytes[:], thisNodeKey):
  3188  		return c.NodeKey2Bytes, nil
  3189  	case bytes.Equal(c.NodeKey2Bytes[:], thisNodeKey):
  3190  		return c.NodeKey1Bytes, nil
  3191  	default:
  3192  		return [33]byte{}, fmt.Errorf("node not participating in this channel")
  3193  	}
  3194  }
  3195  
  3196  // FetchOtherNode attempts to fetch the full LightningNode that's opposite of
  3197  // the target node in the channel. This is useful when one knows the pubkey of
  3198  // one of the nodes, and wishes to obtain the full LightningNode for the other
  3199  // end of the channel.
  3200  func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.RTx, thisNodeKey []byte) (*LightningNode, error) {
  3201  
  3202  	// Ensure that the node passed in is actually a member of the channel.
  3203  	var targetNodeBytes [33]byte
  3204  	switch {
  3205  	case bytes.Equal(c.NodeKey1Bytes[:], thisNodeKey):
  3206  		targetNodeBytes = c.NodeKey2Bytes
  3207  	case bytes.Equal(c.NodeKey2Bytes[:], thisNodeKey):
  3208  		targetNodeBytes = c.NodeKey1Bytes
  3209  	default:
  3210  		return nil, fmt.Errorf("node not participating in this channel")
  3211  	}
  3212  
  3213  	var targetNode *LightningNode
  3214  	fetchNodeFunc := func(tx kvdb.RTx) error {
  3215  		// First grab the nodes bucket which stores the mapping from
  3216  		// pubKey to node information.
  3217  		nodes := tx.ReadBucket(nodeBucket)
  3218  		if nodes == nil {
  3219  			return ErrGraphNotFound
  3220  		}
  3221  
  3222  		node, err := fetchLightningNode(nodes, targetNodeBytes[:])
  3223  		if err != nil {
  3224  			return err
  3225  		}
  3226  		node.db = c.db
  3227  
  3228  		targetNode = &node
  3229  
  3230  		return nil
  3231  	}
  3232  
  3233  	// If the transaction is nil, then we'll need to create a new one,
  3234  	// otherwise we can use the existing db transaction.
  3235  	var err error
  3236  	if tx == nil {
  3237  		err = kvdb.View(c.db, fetchNodeFunc, func() { targetNode = nil })
  3238  	} else {
  3239  		err = fetchNodeFunc(tx)
  3240  	}
  3241  
  3242  	return targetNode, err
  3243  }
  3244  
  3245  // ChannelAuthProof is the authentication proof (the signature portion) for a
  3246  // channel. Using the four signatures contained in the struct, and some
  3247  // auxiliary knowledge (the funding script, node identities, and outpoint) nodes
  3248  // on the network are able to validate the authenticity and existence of a
  3249  // channel. Each of these signatures signs the following digest: chanID ||
  3250  // nodeID1 || nodeID2 || decredKey1|| decredKey2 || 2-byte-feature-len ||
  3251  // features.
  3252  type ChannelAuthProof struct {
  3253  	// nodeSig1 is a cached instance of the first node signature.
  3254  	nodeSig1 *ecdsa.Signature
  3255  
  3256  	// NodeSig1Bytes are the raw bytes of the first node signature encoded
  3257  	// in DER format.
  3258  	NodeSig1Bytes []byte
  3259  
  3260  	// nodeSig2 is a cached instance of the second node signature.
  3261  	nodeSig2 *ecdsa.Signature
  3262  
  3263  	// NodeSig2Bytes are the raw bytes of the second node signature
  3264  	// encoded in DER format.
  3265  	NodeSig2Bytes []byte
  3266  
  3267  	// decredSig1 is a cached instance of the first decred signature.
  3268  	decredSig1 *ecdsa.Signature
  3269  
  3270  	// DecredSig1Bytes are the raw bytes of the first decred signature
  3271  	// encoded in DER format.
  3272  	DecredSig1Bytes []byte
  3273  
  3274  	// decredSig2 is a cached instance of the second decred signature.
  3275  	decredSig2 *ecdsa.Signature
  3276  
  3277  	// DecredSig2Bytes are the raw bytes of the second decred signature
  3278  	// encoded in DER format.
  3279  	DecredSig2Bytes []byte
  3280  }
  3281  
  3282  // Node1Sig is the signature using the identity key of the node that is first
  3283  // in a lexicographical ordering of the serialized public keys of the two nodes
  3284  // that created the channel.
  3285  //
  3286  // NOTE: By having this method to access an attribute, we ensure we only need
  3287  // to fully deserialize the signature if absolutely necessary.
  3288  func (c *ChannelAuthProof) Node1Sig() (*ecdsa.Signature, error) {
  3289  	if c.nodeSig1 != nil {
  3290  		return c.nodeSig1, nil
  3291  	}
  3292  
  3293  	sig, err := ecdsa.ParseDERSignature(c.NodeSig1Bytes)
  3294  	if err != nil {
  3295  		return nil, err
  3296  	}
  3297  
  3298  	c.nodeSig1 = sig
  3299  
  3300  	return sig, nil
  3301  }
  3302  
  3303  // Node2Sig is the signature using the identity key of the node that is second
  3304  // in a lexicographical ordering of the serialized public keys of the two nodes
  3305  // that created the channel.
  3306  //
  3307  // NOTE: By having this method to access an attribute, we ensure we only need
  3308  // to fully deserialize the signature if absolutely necessary.
  3309  func (c *ChannelAuthProof) Node2Sig() (*ecdsa.Signature, error) {
  3310  	if c.nodeSig2 != nil {
  3311  		return c.nodeSig2, nil
  3312  	}
  3313  
  3314  	sig, err := ecdsa.ParseDERSignature(c.NodeSig2Bytes)
  3315  	if err != nil {
  3316  		return nil, err
  3317  	}
  3318  
  3319  	c.nodeSig2 = sig
  3320  
  3321  	return sig, nil
  3322  }
  3323  
  3324  // DecredSig1 is the signature using the public key of the first node that was
  3325  // used in the channel's multi-sig output.
  3326  //
  3327  // NOTE: By having this method to access an attribute, we ensure we only need
  3328  // to fully deserialize the signature if absolutely necessary.
  3329  func (c *ChannelAuthProof) DecredSig1() (*ecdsa.Signature, error) {
  3330  	if c.decredSig1 != nil {
  3331  		return c.decredSig1, nil
  3332  	}
  3333  
  3334  	sig, err := ecdsa.ParseDERSignature(c.DecredSig1Bytes)
  3335  	if err != nil {
  3336  		return nil, err
  3337  	}
  3338  
  3339  	c.decredSig1 = sig
  3340  
  3341  	return sig, nil
  3342  }
  3343  
  3344  // DecredSig2 is the signature using the public key of the second node that
  3345  // was used in the channel's multi-sig output.
  3346  //
  3347  // NOTE: By having this method to access an attribute, we ensure we only need
  3348  // to fully deserialize the signature if absolutely necessary.
  3349  func (c *ChannelAuthProof) DecredSig2() (*ecdsa.Signature, error) {
  3350  	if c.decredSig2 != nil {
  3351  		return c.decredSig2, nil
  3352  	}
  3353  
  3354  	sig, err := ecdsa.ParseDERSignature(c.DecredSig2Bytes)
  3355  	if err != nil {
  3356  		return nil, err
  3357  	}
  3358  
  3359  	c.decredSig2 = sig
  3360  
  3361  	return sig, nil
  3362  }
  3363  
  3364  // IsEmpty check is the authentication proof is empty Proof is empty if at
  3365  // least one of the signatures are equal to nil.
  3366  func (c *ChannelAuthProof) IsEmpty() bool {
  3367  	return len(c.NodeSig1Bytes) == 0 ||
  3368  		len(c.NodeSig2Bytes) == 0 ||
  3369  		len(c.DecredSig1Bytes) == 0 ||
  3370  		len(c.DecredSig2Bytes) == 0
  3371  }
  3372  
  3373  // ChannelEdgePolicy represents a *directed* edge within the channel graph. For
  3374  // each channel in the database, there are two distinct edges: one for each
  3375  // possible direction of travel along the channel. The edges themselves hold
  3376  // information concerning fees, and minimum time-lock information which is
  3377  // utilized during path finding.
  3378  type ChannelEdgePolicy struct {
  3379  	// SigBytes is the raw bytes of the signature of the channel edge
  3380  	// policy. We'll only parse these if the caller needs to access the
  3381  	// signature for validation purposes. Do not set SigBytes directly, but
  3382  	// use SetSigBytes instead to make sure that the cache is invalidated.
  3383  	SigBytes []byte
  3384  
  3385  	// sig is a cached fully parsed signature.
  3386  	sig *ecdsa.Signature
  3387  
  3388  	// ChannelID is the unique channel ID for the channel. The first 3
  3389  	// bytes are the block height, the next 3 the index within the block,
  3390  	// and the last 2 bytes are the output index for the channel.
  3391  	ChannelID uint64
  3392  
  3393  	// LastUpdate is the last time an authenticated edge for this channel
  3394  	// was received.
  3395  	LastUpdate time.Time
  3396  
  3397  	// MessageFlags is a bitfield which indicates the presence of optional
  3398  	// fields (like max_htlc) in the policy.
  3399  	MessageFlags lnwire.ChanUpdateMsgFlags
  3400  
  3401  	// ChannelFlags is a bitfield which signals the capabilities of the
  3402  	// channel as well as the directed edge this update applies to.
  3403  	ChannelFlags lnwire.ChanUpdateChanFlags
  3404  
  3405  	// TimeLockDelta is the number of blocks this node will subtract from
  3406  	// the expiry of an incoming HTLC. This value expresses the time buffer
  3407  	// the node would like to HTLC exchanges.
  3408  	TimeLockDelta uint16
  3409  
  3410  	// MinHTLC is the smallest value HTLC this node will forward, expressed
  3411  	// in millisatoshi.
  3412  	MinHTLC lnwire.MilliAtom
  3413  
  3414  	// MaxHTLC is the largest value HTLC this node will forward, expressed
  3415  	// in millisatoshi.
  3416  	MaxHTLC lnwire.MilliAtom
  3417  
  3418  	// FeeBaseMAtoms is the base HTLC fee that will be charged for
  3419  	// forwarding ANY HTLC, expressed in milli-atoms's.
  3420  	FeeBaseMAtoms lnwire.MilliAtom
  3421  
  3422  	// FeeProportionalMillionths is the rate that the node will charge for
  3423  	// HTLCs for each millionth of a satoshi forwarded.
  3424  	FeeProportionalMillionths lnwire.MilliAtom
  3425  
  3426  	// Node is the LightningNode that this directed edge leads to. Using
  3427  	// this pointer the channel graph can further be traversed.
  3428  	Node *LightningNode
  3429  
  3430  	// ExtraOpaqueData is the set of data that was appended to this
  3431  	// message, some of which we may not actually know how to iterate or
  3432  	// parse. By holding onto this data, we ensure that we're able to
  3433  	// properly validate the set of signatures that cover these new fields,
  3434  	// and ensure we're able to make upgrades to the network in a forwards
  3435  	// compatible manner.
  3436  	ExtraOpaqueData []byte
  3437  
  3438  	db kvdb.Backend
  3439  }
  3440  
  3441  // Signature is a channel announcement signature, which is needed for proper
  3442  // edge policy announcement.
  3443  //
  3444  // NOTE: By having this method to access an attribute, we ensure we only need
  3445  // to fully deserialize the signature if absolutely necessary.
  3446  func (c *ChannelEdgePolicy) Signature() (*ecdsa.Signature, error) {
  3447  	if c.sig != nil {
  3448  		return c.sig, nil
  3449  	}
  3450  
  3451  	sig, err := ecdsa.ParseDERSignature(c.SigBytes)
  3452  	if err != nil {
  3453  		return nil, err
  3454  	}
  3455  
  3456  	c.sig = sig
  3457  
  3458  	return sig, nil
  3459  }
  3460  
  3461  // SetSigBytes updates the signature and invalidates the cached parsed
  3462  // signature.
  3463  func (c *ChannelEdgePolicy) SetSigBytes(sig []byte) {
  3464  	c.SigBytes = sig
  3465  	c.sig = nil
  3466  }
  3467  
  3468  // IsDisabled determines whether the edge has the disabled bit set.
  3469  func (c *ChannelEdgePolicy) IsDisabled() bool {
  3470  	return c.ChannelFlags.IsDisabled()
  3471  }
  3472  
  3473  // ComputeFee computes the fee to forward an HTLC of `amt` milli-atoms over the
  3474  // passed active payment channel. This value is currently computed as specified
  3475  // in BOLT07, but will likely change in the near future.
  3476  func (c *ChannelEdgePolicy) ComputeFee(
  3477  	amt lnwire.MilliAtom) lnwire.MilliAtom {
  3478  
  3479  	return c.FeeBaseMAtoms + (amt*c.FeeProportionalMillionths)/feeRateParts
  3480  }
  3481  
  3482  // divideCeil divides dividend by factor and rounds the result up.
  3483  func divideCeil(dividend, factor lnwire.MilliAtom) lnwire.MilliAtom {
  3484  	return (dividend + factor - 1) / factor
  3485  }
  3486  
  3487  // ComputeFeeFromIncoming computes the fee to forward an HTLC given the incoming
  3488  // amount.
  3489  func (c *ChannelEdgePolicy) ComputeFeeFromIncoming(
  3490  	incomingAmt lnwire.MilliAtom) lnwire.MilliAtom {
  3491  
  3492  	return incomingAmt - divideCeil(
  3493  		feeRateParts*(incomingAmt-c.FeeBaseMAtoms),
  3494  		feeRateParts+c.FeeProportionalMillionths,
  3495  	)
  3496  }
  3497  
  3498  // FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
  3499  // the channel identified by the funding outpoint. If the channel can't be
  3500  // found, then ErrEdgeNotFound is returned. A struct which houses the general
  3501  // information for the channel itself is returned as well as two structs that
  3502  // contain the routing policies for the channel in either direction.
  3503  func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint,
  3504  ) (*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy, error) {
  3505  
  3506  	var (
  3507  		edgeInfo *ChannelEdgeInfo
  3508  		policy1  *ChannelEdgePolicy
  3509  		policy2  *ChannelEdgePolicy
  3510  	)
  3511  
  3512  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  3513  		// First, grab the node bucket. This will be used to populate
  3514  		// the Node pointers in each edge read from disk.
  3515  		nodes := tx.ReadBucket(nodeBucket)
  3516  		if nodes == nil {
  3517  			return ErrGraphNotFound
  3518  		}
  3519  
  3520  		// Next, grab the edge bucket which stores the edges, and also
  3521  		// the index itself so we can group the directed edges together
  3522  		// logically.
  3523  		edges := tx.ReadBucket(edgeBucket)
  3524  		if edges == nil {
  3525  			return ErrGraphNoEdgesFound
  3526  		}
  3527  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  3528  		if edgeIndex == nil {
  3529  			return ErrGraphNoEdgesFound
  3530  		}
  3531  
  3532  		// If the channel's outpoint doesn't exist within the outpoint
  3533  		// index, then the edge does not exist.
  3534  		chanIndex := edges.NestedReadBucket(channelPointBucket)
  3535  		if chanIndex == nil {
  3536  			return ErrGraphNoEdgesFound
  3537  		}
  3538  		var b bytes.Buffer
  3539  		if err := writeOutpoint(&b, op); err != nil {
  3540  			return err
  3541  		}
  3542  		chanID := chanIndex.Get(b.Bytes())
  3543  		if chanID == nil {
  3544  			return ErrEdgeNotFound
  3545  		}
  3546  
  3547  		// If the channel is found to exists, then we'll first retrieve
  3548  		// the general information for the channel.
  3549  		edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
  3550  		if err != nil {
  3551  			return err
  3552  		}
  3553  		edgeInfo = &edge
  3554  		edgeInfo.db = c.db
  3555  
  3556  		// Once we have the information about the channels' parameters,
  3557  		// we'll fetch the routing policies for each for the directed
  3558  		// edges.
  3559  		e1, e2, err := fetchChanEdgePolicies(
  3560  			edgeIndex, edges, nodes, chanID, c.db,
  3561  		)
  3562  		if err != nil {
  3563  			return err
  3564  		}
  3565  
  3566  		policy1 = e1
  3567  		policy2 = e2
  3568  		return nil
  3569  	}, func() {
  3570  		edgeInfo = nil
  3571  		policy1 = nil
  3572  		policy2 = nil
  3573  	})
  3574  	if err != nil {
  3575  		return nil, nil, nil, err
  3576  	}
  3577  
  3578  	return edgeInfo, policy1, policy2, nil
  3579  }
  3580  
  3581  // FetchChannelEdgesByID attempts to lookup the two directed edges for the
  3582  // channel identified by the channel ID. If the channel can't be found, then
  3583  // ErrEdgeNotFound is returned. A struct which houses the general information
  3584  // for the channel itself is returned as well as two structs that contain the
  3585  // routing policies for the channel in either direction.
  3586  //
  3587  // ErrZombieEdge an be returned if the edge is currently marked as a zombie
  3588  // within the database. In this case, the ChannelEdgePolicy's will be nil, and
  3589  // the ChannelEdgeInfo will only include the public keys of each node.
  3590  func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
  3591  ) (*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy, error) {
  3592  
  3593  	var (
  3594  		edgeInfo  *ChannelEdgeInfo
  3595  		policy1   *ChannelEdgePolicy
  3596  		policy2   *ChannelEdgePolicy
  3597  		channelID [8]byte
  3598  	)
  3599  
  3600  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  3601  		// First, grab the node bucket. This will be used to populate
  3602  		// the Node pointers in each edge read from disk.
  3603  		nodes := tx.ReadBucket(nodeBucket)
  3604  		if nodes == nil {
  3605  			return ErrGraphNotFound
  3606  		}
  3607  
  3608  		// Next, grab the edge bucket which stores the edges, and also
  3609  		// the index itself so we can group the directed edges together
  3610  		// logically.
  3611  		edges := tx.ReadBucket(edgeBucket)
  3612  		if edges == nil {
  3613  			return ErrGraphNoEdgesFound
  3614  		}
  3615  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  3616  		if edgeIndex == nil {
  3617  			return ErrGraphNoEdgesFound
  3618  		}
  3619  
  3620  		byteOrder.PutUint64(channelID[:], chanID)
  3621  
  3622  		// Now, attempt to fetch edge.
  3623  		edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
  3624  
  3625  		// If it doesn't exist, we'll quickly check our zombie index to
  3626  		// see if we've previously marked it as so.
  3627  		if err == ErrEdgeNotFound {
  3628  			// If the zombie index doesn't exist, or the edge is not
  3629  			// marked as a zombie within it, then we'll return the
  3630  			// original ErrEdgeNotFound error.
  3631  			zombieIndex := edges.NestedReadBucket(zombieBucket)
  3632  			if zombieIndex == nil {
  3633  				return ErrEdgeNotFound
  3634  			}
  3635  
  3636  			isZombie, pubKey1, pubKey2 := isZombieEdge(
  3637  				zombieIndex, chanID,
  3638  			)
  3639  			if !isZombie {
  3640  				return ErrEdgeNotFound
  3641  			}
  3642  
  3643  			// Otherwise, the edge is marked as a zombie, so we'll
  3644  			// populate the edge info with the public keys of each
  3645  			// party as this is the only information we have about
  3646  			// it and return an error signaling so.
  3647  			edgeInfo = &ChannelEdgeInfo{
  3648  				NodeKey1Bytes: pubKey1,
  3649  				NodeKey2Bytes: pubKey2,
  3650  			}
  3651  			return ErrZombieEdge
  3652  		}
  3653  
  3654  		// Otherwise, we'll just return the error if any.
  3655  		if err != nil {
  3656  			return err
  3657  		}
  3658  
  3659  		edgeInfo = &edge
  3660  		edgeInfo.db = c.db
  3661  
  3662  		// Then we'll attempt to fetch the accompanying policies of this
  3663  		// edge.
  3664  		e1, e2, err := fetchChanEdgePolicies(
  3665  			edgeIndex, edges, nodes, channelID[:], c.db,
  3666  		)
  3667  		if err != nil {
  3668  			return err
  3669  		}
  3670  
  3671  		policy1 = e1
  3672  		policy2 = e2
  3673  		return nil
  3674  	}, func() {
  3675  		edgeInfo = nil
  3676  		policy1 = nil
  3677  		policy2 = nil
  3678  	})
  3679  	if err == ErrZombieEdge {
  3680  		return edgeInfo, nil, nil, err
  3681  	}
  3682  	if err != nil {
  3683  		return nil, nil, nil, err
  3684  	}
  3685  
  3686  	return edgeInfo, policy1, policy2, nil
  3687  }
  3688  
  3689  // IsPublicNode is a helper method that determines whether the node with the
  3690  // given public key is seen as a public node in the graph from the graph's
  3691  // source node's point of view.
  3692  func (c *ChannelGraph) IsPublicNode(pubKey [33]byte) (bool, error) {
  3693  	var nodeIsPublic bool
  3694  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  3695  		nodes := tx.ReadBucket(nodeBucket)
  3696  		if nodes == nil {
  3697  			return ErrGraphNodesNotFound
  3698  		}
  3699  		ourPubKey := nodes.Get(sourceKey)
  3700  		if ourPubKey == nil {
  3701  			return ErrSourceNodeNotSet
  3702  		}
  3703  		node, err := fetchLightningNode(nodes, pubKey[:])
  3704  		if err != nil {
  3705  			return err
  3706  		}
  3707  
  3708  		nodeIsPublic, err = node.isPublic(tx, ourPubKey)
  3709  		return err
  3710  	}, func() {
  3711  		nodeIsPublic = false
  3712  	})
  3713  	if err != nil {
  3714  		return false, err
  3715  	}
  3716  
  3717  	return nodeIsPublic, nil
  3718  }
  3719  
  3720  // genMultiSigP2SH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
  3721  func genMultiSigP2SH(aPub, bPub []byte) ([]byte, error) {
  3722  	if len(aPub) != 33 || len(bPub) != 33 {
  3723  		return nil, fmt.Errorf("pubkey size error. Compressed " +
  3724  			"pubkeys only")
  3725  	}
  3726  
  3727  	// Swap to sort pubkeys if needed. Keys are sorted in lexicographical
  3728  	// order. The signatures within the scriptSig must also adhere to the
  3729  	// order, ensuring that the signatures for each public key appears in
  3730  	// the proper order on the stack.
  3731  	if bytes.Compare(aPub, bPub) == 1 {
  3732  		aPub, bPub = bPub, aPub
  3733  	}
  3734  
  3735  	// First, we'll generate the witness script for the multi-sig.
  3736  	bldr := txscript.NewScriptBuilder()
  3737  	bldr.AddOp(txscript.OP_2)
  3738  	bldr.AddData(aPub) // Add both pubkeys (sorted).
  3739  	bldr.AddData(bPub)
  3740  	bldr.AddOp(txscript.OP_2)
  3741  	bldr.AddOp(txscript.OP_CHECKMULTISIG)
  3742  	witnessScript, err := bldr.Script()
  3743  	if err != nil {
  3744  		return nil, err
  3745  	}
  3746  
  3747  	// With the witness script generated, we'll now turn it into a p2sh
  3748  	// script:
  3749  	//  * OP_HASH160 <HASH160(script)> OP_EQUAL
  3750  	return input.ScriptHashPkScript(witnessScript)
  3751  }
  3752  
  3753  // EdgePoint couples the outpoint of a channel with the funding script that it
  3754  // creates. The FilteredChainView will use this to watch for spends of this
  3755  // edge point on chain. We require both of these values as depending on the
  3756  // concrete implementation, either the pkScript, or the out point will be used.
  3757  type EdgePoint struct {
  3758  	// FundingPkScript is the p2wsh multi-sig script of the target channel.
  3759  	FundingPkScript []byte
  3760  
  3761  	// OutPoint is the outpoint of the target channel.
  3762  	OutPoint wire.OutPoint
  3763  }
  3764  
  3765  // String returns a human readable version of the target EdgePoint. We return
  3766  // the outpoint directly as it is enough to uniquely identify the edge point.
  3767  func (e *EdgePoint) String() string {
  3768  	return e.OutPoint.String()
  3769  }
  3770  
  3771  // ChannelView returns the verifiable edge information for each active channel
  3772  // within the known channel graph. The set of UTXO's (along with their scripts)
  3773  // returned are the ones that need to be watched on chain to detect channel
  3774  // closes on the resident blockchain.
  3775  func (c *ChannelGraph) ChannelView() ([]EdgePoint, error) {
  3776  	var edgePoints []EdgePoint
  3777  	if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  3778  		// We're going to iterate over the entire channel index, so
  3779  		// we'll need to fetch the edgeBucket to get to the index as
  3780  		// it's a sub-bucket.
  3781  		edges := tx.ReadBucket(edgeBucket)
  3782  		if edges == nil {
  3783  			return ErrGraphNoEdgesFound
  3784  		}
  3785  		chanIndex := edges.NestedReadBucket(channelPointBucket)
  3786  		if chanIndex == nil {
  3787  			return ErrGraphNoEdgesFound
  3788  		}
  3789  		edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
  3790  		if edgeIndex == nil {
  3791  			return ErrGraphNoEdgesFound
  3792  		}
  3793  
  3794  		// Once we have the proper bucket, we'll range over each key
  3795  		// (which is the channel point for the channel) and decode it,
  3796  		// accumulating each entry.
  3797  		return chanIndex.ForEach(func(chanPointBytes, chanID []byte) error {
  3798  			chanPointReader := bytes.NewReader(chanPointBytes)
  3799  
  3800  			var chanPoint wire.OutPoint
  3801  			err := readOutpoint(chanPointReader, &chanPoint)
  3802  			if err != nil {
  3803  				return err
  3804  			}
  3805  
  3806  			edgeInfo, err := fetchChanEdgeInfo(
  3807  				edgeIndex, chanID,
  3808  			)
  3809  			if err != nil {
  3810  				return err
  3811  			}
  3812  
  3813  			// TODO(decred) ideally this should use the same thing as in
  3814  			// lnwallet's script_utils.go file.
  3815  			pkScript, err := genMultiSigP2SH(
  3816  				edgeInfo.DecredKey1Bytes[:],
  3817  				edgeInfo.DecredKey2Bytes[:],
  3818  			)
  3819  			if err != nil {
  3820  				return err
  3821  			}
  3822  
  3823  			edgePoints = append(edgePoints, EdgePoint{
  3824  				FundingPkScript: pkScript,
  3825  				OutPoint:        chanPoint,
  3826  			})
  3827  
  3828  			return nil
  3829  		})
  3830  	}, func() {
  3831  		edgePoints = nil
  3832  	}); err != nil {
  3833  		return nil, err
  3834  	}
  3835  
  3836  	return edgePoints, nil
  3837  }
  3838  
  3839  // NewChannelEdgePolicy returns a new blank ChannelEdgePolicy.
  3840  func (c *ChannelGraph) NewChannelEdgePolicy() *ChannelEdgePolicy {
  3841  	return &ChannelEdgePolicy{db: c.db}
  3842  }
  3843  
  3844  // MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
  3845  // zombie. This method is used on an ad-hoc basis, when channels need to be
  3846  // marked as zombies outside the normal pruning cycle.
  3847  func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
  3848  	pubKey1, pubKey2 [33]byte) error {
  3849  
  3850  	c.cacheMu.Lock()
  3851  	defer c.cacheMu.Unlock()
  3852  
  3853  	err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
  3854  		edges := tx.ReadWriteBucket(edgeBucket)
  3855  		if edges == nil {
  3856  			return ErrGraphNoEdgesFound
  3857  		}
  3858  		zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
  3859  		if err != nil {
  3860  			return fmt.Errorf("unable to create zombie "+
  3861  				"bucket: %w", err)
  3862  		}
  3863  
  3864  		if c.graphCache != nil {
  3865  			c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
  3866  		}
  3867  
  3868  		return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
  3869  	})
  3870  	if err != nil {
  3871  		return err
  3872  	}
  3873  
  3874  	c.rejectCache.remove(chanID)
  3875  	c.chanCache.remove(chanID)
  3876  
  3877  	return nil
  3878  }
  3879  
  3880  // markEdgeZombie marks an edge as a zombie within our zombie index. The public
  3881  // keys should represent the node public keys of the two parties involved in
  3882  // the edge.
  3883  func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
  3884  	pubKey2 [33]byte) error {
  3885  
  3886  	var k [8]byte
  3887  	byteOrder.PutUint64(k[:], chanID)
  3888  
  3889  	var v [66]byte
  3890  	copy(v[:33], pubKey1[:])
  3891  	copy(v[33:], pubKey2[:])
  3892  
  3893  	return zombieIndex.Put(k[:], v[:])
  3894  }
  3895  
  3896  // MarkEdgeLive clears an edge from our zombie index, deeming it as live.
  3897  func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
  3898  	c.cacheMu.Lock()
  3899  	defer c.cacheMu.Unlock()
  3900  
  3901  	err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
  3902  		edges := tx.ReadWriteBucket(edgeBucket)
  3903  		if edges == nil {
  3904  			return ErrGraphNoEdgesFound
  3905  		}
  3906  		zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
  3907  		if zombieIndex == nil {
  3908  			return nil
  3909  		}
  3910  
  3911  		var k [8]byte
  3912  		byteOrder.PutUint64(k[:], chanID)
  3913  		return zombieIndex.Delete(k[:])
  3914  	}, func() {})
  3915  	if err != nil {
  3916  		return err
  3917  	}
  3918  
  3919  	c.rejectCache.remove(chanID)
  3920  	c.chanCache.remove(chanID)
  3921  
  3922  	// We need to add the channel back into our graph cache, otherwise we
  3923  	// won't use it for path finding.
  3924  	edgeInfos, err := c.FetchChanInfos([]uint64{chanID})
  3925  	if err != nil {
  3926  		return err
  3927  	}
  3928  	if c.graphCache != nil {
  3929  		for _, edgeInfo := range edgeInfos {
  3930  			c.graphCache.AddChannel(
  3931  				edgeInfo.Info, edgeInfo.Policy1,
  3932  				edgeInfo.Policy2,
  3933  			)
  3934  		}
  3935  	}
  3936  
  3937  	return nil
  3938  }
  3939  
  3940  // IsZombieEdge returns whether the edge is considered zombie. If it is a
  3941  // zombie, then the two node public keys corresponding to this edge are also
  3942  // returned.
  3943  func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
  3944  	var (
  3945  		isZombie         bool
  3946  		pubKey1, pubKey2 [33]byte
  3947  	)
  3948  
  3949  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  3950  		edges := tx.ReadBucket(edgeBucket)
  3951  		if edges == nil {
  3952  			return ErrGraphNoEdgesFound
  3953  		}
  3954  		zombieIndex := edges.NestedReadBucket(zombieBucket)
  3955  		if zombieIndex == nil {
  3956  			return nil
  3957  		}
  3958  
  3959  		isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
  3960  		return nil
  3961  	}, func() {
  3962  		isZombie = false
  3963  		pubKey1 = [33]byte{}
  3964  		pubKey2 = [33]byte{}
  3965  	})
  3966  	if err != nil {
  3967  		return false, [33]byte{}, [33]byte{}
  3968  	}
  3969  
  3970  	return isZombie, pubKey1, pubKey2
  3971  }
  3972  
  3973  // isZombieEdge returns whether an entry exists for the given channel in the
  3974  // zombie index. If an entry exists, then the two node public keys corresponding
  3975  // to this edge are also returned.
  3976  func isZombieEdge(zombieIndex kvdb.RBucket,
  3977  	chanID uint64) (bool, [33]byte, [33]byte) {
  3978  
  3979  	var k [8]byte
  3980  	byteOrder.PutUint64(k[:], chanID)
  3981  
  3982  	v := zombieIndex.Get(k[:])
  3983  	if v == nil {
  3984  		return false, [33]byte{}, [33]byte{}
  3985  	}
  3986  
  3987  	var pubKey1, pubKey2 [33]byte
  3988  	copy(pubKey1[:], v[:33])
  3989  	copy(pubKey2[:], v[33:])
  3990  
  3991  	return true, pubKey1, pubKey2
  3992  }
  3993  
  3994  // NumZombies returns the current number of zombie channels in the graph.
  3995  func (c *ChannelGraph) NumZombies() (uint64, error) {
  3996  	var numZombies uint64
  3997  	err := kvdb.View(c.db, func(tx kvdb.RTx) error {
  3998  		edges := tx.ReadBucket(edgeBucket)
  3999  		if edges == nil {
  4000  			return nil
  4001  		}
  4002  		zombieIndex := edges.NestedReadBucket(zombieBucket)
  4003  		if zombieIndex == nil {
  4004  			return nil
  4005  		}
  4006  
  4007  		return zombieIndex.ForEach(func(_, _ []byte) error {
  4008  			numZombies++
  4009  			return nil
  4010  		})
  4011  	}, func() {
  4012  		numZombies = 0
  4013  	})
  4014  	if err != nil {
  4015  		return 0, err
  4016  	}
  4017  
  4018  	return numZombies, nil
  4019  }
  4020  
  4021  func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket,
  4022  	updateIndex kvdb.RwBucket, node *LightningNode) error {
  4023  
  4024  	var (
  4025  		scratch [16]byte
  4026  		b       bytes.Buffer
  4027  	)
  4028  
  4029  	pub, err := node.PubKey()
  4030  	if err != nil {
  4031  		return err
  4032  	}
  4033  	nodePub := pub.SerializeCompressed()
  4034  
  4035  	// If the node has the update time set, write it, else write 0.
  4036  	updateUnix := uint64(0)
  4037  	if node.LastUpdate.Unix() > 0 {
  4038  		updateUnix = uint64(node.LastUpdate.Unix())
  4039  	}
  4040  
  4041  	byteOrder.PutUint64(scratch[:8], updateUnix)
  4042  	if _, err := b.Write(scratch[:8]); err != nil {
  4043  		return err
  4044  	}
  4045  
  4046  	if _, err := b.Write(nodePub); err != nil {
  4047  		return err
  4048  	}
  4049  
  4050  	// If we got a node announcement for this node, we will have the rest
  4051  	// of the data available. If not we don't have more data to write.
  4052  	if !node.HaveNodeAnnouncement {
  4053  		// Write HaveNodeAnnouncement=0.
  4054  		byteOrder.PutUint16(scratch[:2], 0)
  4055  		if _, err := b.Write(scratch[:2]); err != nil {
  4056  			return err
  4057  		}
  4058  
  4059  		return nodeBucket.Put(nodePub, b.Bytes())
  4060  	}
  4061  
  4062  	// Write HaveNodeAnnouncement=1.
  4063  	byteOrder.PutUint16(scratch[:2], 1)
  4064  	if _, err := b.Write(scratch[:2]); err != nil {
  4065  		return err
  4066  	}
  4067  
  4068  	if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
  4069  		return err
  4070  	}
  4071  	if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
  4072  		return err
  4073  	}
  4074  	if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
  4075  		return err
  4076  	}
  4077  
  4078  	if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
  4079  		return err
  4080  	}
  4081  
  4082  	if err := node.Features.Encode(&b); err != nil {
  4083  		return err
  4084  	}
  4085  
  4086  	numAddresses := uint16(len(node.Addresses))
  4087  	byteOrder.PutUint16(scratch[:2], numAddresses)
  4088  	if _, err := b.Write(scratch[:2]); err != nil {
  4089  		return err
  4090  	}
  4091  
  4092  	for _, address := range node.Addresses {
  4093  		if err := serializeAddr(&b, address); err != nil {
  4094  			return err
  4095  		}
  4096  	}
  4097  
  4098  	sigLen := len(node.AuthSigBytes)
  4099  	if sigLen > 80 {
  4100  		return fmt.Errorf("max sig len allowed is 80, had %v",
  4101  			sigLen)
  4102  	}
  4103  
  4104  	err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
  4105  	if err != nil {
  4106  		return err
  4107  	}
  4108  
  4109  	if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
  4110  		return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
  4111  	}
  4112  	err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
  4113  	if err != nil {
  4114  		return err
  4115  	}
  4116  
  4117  	if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
  4118  		return err
  4119  	}
  4120  
  4121  	// With the alias bucket updated, we'll now update the index that
  4122  	// tracks the time series of node updates.
  4123  	var indexKey [8 + 33]byte
  4124  	byteOrder.PutUint64(indexKey[:8], updateUnix)
  4125  	copy(indexKey[8:], nodePub)
  4126  
  4127  	// If there was already an old index entry for this node, then we'll
  4128  	// delete the old one before we write the new entry.
  4129  	if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
  4130  		// Extract out the old update time to we can reconstruct the
  4131  		// prior index key to delete it from the index.
  4132  		oldUpdateTime := nodeBytes[:8]
  4133  
  4134  		var oldIndexKey [8 + 33]byte
  4135  		copy(oldIndexKey[:8], oldUpdateTime)
  4136  		copy(oldIndexKey[8:], nodePub)
  4137  
  4138  		if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
  4139  			return err
  4140  		}
  4141  	}
  4142  
  4143  	if err := updateIndex.Put(indexKey[:], nil); err != nil {
  4144  		return err
  4145  	}
  4146  
  4147  	return nodeBucket.Put(nodePub, b.Bytes())
  4148  }
  4149  
  4150  func fetchLightningNode(nodeBucket kvdb.RBucket,
  4151  	nodePub []byte) (LightningNode, error) {
  4152  
  4153  	nodeBytes := nodeBucket.Get(nodePub)
  4154  	if nodeBytes == nil {
  4155  		return LightningNode{}, ErrGraphNodeNotFound
  4156  	}
  4157  
  4158  	nodeReader := bytes.NewReader(nodeBytes)
  4159  	return deserializeLightningNode(nodeReader)
  4160  }
  4161  
  4162  func deserializeLightningNodeCacheable(r io.Reader) (*graphCacheNode, error) {
  4163  	// Always populate a feature vector, even if we don't have a node
  4164  	// announcement and short circuit below.
  4165  	node := newGraphCacheNode(
  4166  		route.Vertex{},
  4167  		lnwire.EmptyFeatureVector(),
  4168  	)
  4169  
  4170  	var nodeScratch [8]byte
  4171  
  4172  	// Skip ahead:
  4173  	// - LastUpdate (8 bytes)
  4174  	if _, err := r.Read(nodeScratch[:]); err != nil {
  4175  		return nil, err
  4176  	}
  4177  
  4178  	if _, err := io.ReadFull(r, node.pubKeyBytes[:]); err != nil {
  4179  		return nil, err
  4180  	}
  4181  
  4182  	// Read the node announcement flag.
  4183  	if _, err := r.Read(nodeScratch[:2]); err != nil {
  4184  		return nil, err
  4185  	}
  4186  	hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
  4187  
  4188  	// The rest of the data is optional, and will only be there if we got a
  4189  	// node announcement for this node.
  4190  	if hasNodeAnn == 0 {
  4191  		return node, nil
  4192  	}
  4193  
  4194  	// We did get a node announcement for this node, so we'll have the rest
  4195  	// of the data available.
  4196  	var rgb uint8
  4197  	if err := binary.Read(r, byteOrder, &rgb); err != nil {
  4198  		return nil, err
  4199  	}
  4200  	if err := binary.Read(r, byteOrder, &rgb); err != nil {
  4201  		return nil, err
  4202  	}
  4203  	if err := binary.Read(r, byteOrder, &rgb); err != nil {
  4204  		return nil, err
  4205  	}
  4206  
  4207  	if _, err := wire.ReadVarString(r, 0); err != nil {
  4208  		return nil, err
  4209  	}
  4210  
  4211  	if err := node.features.Decode(r); err != nil {
  4212  		return nil, err
  4213  	}
  4214  
  4215  	return node, nil
  4216  }
  4217  
  4218  func deserializeLightningNode(r io.Reader) (LightningNode, error) {
  4219  	var (
  4220  		node    LightningNode
  4221  		scratch [8]byte
  4222  		err     error
  4223  	)
  4224  
  4225  	// Always populate a feature vector, even if we don't have a node
  4226  	// announcement and short circuit below.
  4227  	node.Features = lnwire.EmptyFeatureVector()
  4228  
  4229  	if _, err := r.Read(scratch[:]); err != nil {
  4230  		return LightningNode{}, err
  4231  	}
  4232  
  4233  	unix := int64(byteOrder.Uint64(scratch[:]))
  4234  	node.LastUpdate = time.Unix(unix, 0)
  4235  
  4236  	if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
  4237  		return LightningNode{}, err
  4238  	}
  4239  
  4240  	if _, err := r.Read(scratch[:2]); err != nil {
  4241  		return LightningNode{}, err
  4242  	}
  4243  
  4244  	hasNodeAnn := byteOrder.Uint16(scratch[:2])
  4245  	if hasNodeAnn == 1 {
  4246  		node.HaveNodeAnnouncement = true
  4247  	} else {
  4248  		node.HaveNodeAnnouncement = false
  4249  	}
  4250  
  4251  	// The rest of the data is optional, and will only be there if we got a node
  4252  	// announcement for this node.
  4253  	if !node.HaveNodeAnnouncement {
  4254  		return node, nil
  4255  	}
  4256  
  4257  	// We did get a node announcement for this node, so we'll have the rest
  4258  	// of the data available.
  4259  	if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
  4260  		return LightningNode{}, err
  4261  	}
  4262  	if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
  4263  		return LightningNode{}, err
  4264  	}
  4265  	if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
  4266  		return LightningNode{}, err
  4267  	}
  4268  
  4269  	node.Alias, err = wire.ReadVarString(r, 0)
  4270  	if err != nil {
  4271  		return LightningNode{}, err
  4272  	}
  4273  
  4274  	err = node.Features.Decode(r)
  4275  	if err != nil {
  4276  		return LightningNode{}, err
  4277  	}
  4278  
  4279  	if _, err := r.Read(scratch[:2]); err != nil {
  4280  		return LightningNode{}, err
  4281  	}
  4282  	numAddresses := int(byteOrder.Uint16(scratch[:2]))
  4283  
  4284  	var addresses []net.Addr
  4285  	for i := 0; i < numAddresses; i++ {
  4286  		address, err := deserializeAddr(r)
  4287  		if err != nil {
  4288  			return LightningNode{}, err
  4289  		}
  4290  		addresses = append(addresses, address)
  4291  	}
  4292  	node.Addresses = addresses
  4293  
  4294  	node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
  4295  	if err != nil {
  4296  		return LightningNode{}, err
  4297  	}
  4298  
  4299  	// We'll try and see if there are any opaque bytes left, if not, then
  4300  	// we'll ignore the EOF error and return the node as is.
  4301  	node.ExtraOpaqueData, err = wire.ReadVarBytes(
  4302  		r, 0, MaxAllowedExtraOpaqueBytes, "blob",
  4303  	)
  4304  	switch {
  4305  	case err == io.ErrUnexpectedEOF:
  4306  	case err == io.EOF:
  4307  	case err != nil:
  4308  		return LightningNode{}, err
  4309  	}
  4310  
  4311  	return node, nil
  4312  }
  4313  
  4314  func putChanEdgeInfo(edgeIndex kvdb.RwBucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) error {
  4315  	var b bytes.Buffer
  4316  
  4317  	if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
  4318  		return err
  4319  	}
  4320  	if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
  4321  		return err
  4322  	}
  4323  	if _, err := b.Write(edgeInfo.DecredKey1Bytes[:]); err != nil {
  4324  		return err
  4325  	}
  4326  	if _, err := b.Write(edgeInfo.DecredKey2Bytes[:]); err != nil {
  4327  		return err
  4328  	}
  4329  
  4330  	if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
  4331  		return err
  4332  	}
  4333  
  4334  	authProof := edgeInfo.AuthProof
  4335  	var nodeSig1, nodeSig2, decredSig1, decredSig2 []byte
  4336  	if authProof != nil {
  4337  		nodeSig1 = authProof.NodeSig1Bytes
  4338  		nodeSig2 = authProof.NodeSig2Bytes
  4339  		decredSig1 = authProof.DecredSig1Bytes
  4340  		decredSig2 = authProof.DecredSig2Bytes
  4341  	}
  4342  
  4343  	if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
  4344  		return err
  4345  	}
  4346  	if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
  4347  		return err
  4348  	}
  4349  	if err := wire.WriteVarBytes(&b, 0, decredSig1); err != nil {
  4350  		return err
  4351  	}
  4352  	if err := wire.WriteVarBytes(&b, 0, decredSig2); err != nil {
  4353  		return err
  4354  	}
  4355  
  4356  	if err := writeOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
  4357  		return err
  4358  	}
  4359  	if err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity)); err != nil {
  4360  		return err
  4361  	}
  4362  	if _, err := b.Write(chanID[:]); err != nil {
  4363  		return err
  4364  	}
  4365  	if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
  4366  		return err
  4367  	}
  4368  
  4369  	if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
  4370  		return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
  4371  	}
  4372  	err := wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
  4373  	if err != nil {
  4374  		return err
  4375  	}
  4376  
  4377  	return edgeIndex.Put(chanID[:], b.Bytes())
  4378  }
  4379  
  4380  func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
  4381  	chanID []byte) (ChannelEdgeInfo, error) {
  4382  
  4383  	edgeInfoBytes := edgeIndex.Get(chanID)
  4384  	if edgeInfoBytes == nil {
  4385  		return ChannelEdgeInfo{}, ErrEdgeNotFound
  4386  	}
  4387  
  4388  	edgeInfoReader := bytes.NewReader(edgeInfoBytes)
  4389  	return deserializeChanEdgeInfo(edgeInfoReader)
  4390  }
  4391  
  4392  func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
  4393  	var (
  4394  		err      error
  4395  		edgeInfo ChannelEdgeInfo
  4396  	)
  4397  
  4398  	if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
  4399  		return ChannelEdgeInfo{}, err
  4400  	}
  4401  	if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
  4402  		return ChannelEdgeInfo{}, err
  4403  	}
  4404  	if _, err := io.ReadFull(r, edgeInfo.DecredKey1Bytes[:]); err != nil {
  4405  		return ChannelEdgeInfo{}, err
  4406  	}
  4407  	if _, err := io.ReadFull(r, edgeInfo.DecredKey2Bytes[:]); err != nil {
  4408  		return ChannelEdgeInfo{}, err
  4409  	}
  4410  
  4411  	edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
  4412  	if err != nil {
  4413  		return ChannelEdgeInfo{}, err
  4414  	}
  4415  
  4416  	proof := &ChannelAuthProof{}
  4417  
  4418  	proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
  4419  	if err != nil {
  4420  		return ChannelEdgeInfo{}, err
  4421  	}
  4422  	proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
  4423  	if err != nil {
  4424  		return ChannelEdgeInfo{}, err
  4425  	}
  4426  	proof.DecredSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
  4427  	if err != nil {
  4428  		return ChannelEdgeInfo{}, err
  4429  	}
  4430  	proof.DecredSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
  4431  	if err != nil {
  4432  		return ChannelEdgeInfo{}, err
  4433  	}
  4434  
  4435  	if !proof.IsEmpty() {
  4436  		edgeInfo.AuthProof = proof
  4437  	}
  4438  
  4439  	edgeInfo.ChannelPoint = wire.OutPoint{}
  4440  	if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
  4441  		return ChannelEdgeInfo{}, err
  4442  	}
  4443  	if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
  4444  		return ChannelEdgeInfo{}, err
  4445  	}
  4446  	if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
  4447  		return ChannelEdgeInfo{}, err
  4448  	}
  4449  
  4450  	if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
  4451  		return ChannelEdgeInfo{}, err
  4452  	}
  4453  
  4454  	// We'll try and see if there are any opaque bytes left, if not, then
  4455  	// we'll ignore the EOF error and return the edge as is.
  4456  	edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
  4457  		r, 0, MaxAllowedExtraOpaqueBytes, "blob",
  4458  	)
  4459  	switch {
  4460  	case err == io.ErrUnexpectedEOF:
  4461  	case err == io.EOF:
  4462  	case err != nil:
  4463  		return ChannelEdgeInfo{}, err
  4464  	}
  4465  
  4466  	return edgeInfo, nil
  4467  }
  4468  
  4469  func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
  4470  	from, to []byte) error {
  4471  
  4472  	var edgeKey [33 + 8]byte
  4473  	copy(edgeKey[:], from)
  4474  	byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
  4475  
  4476  	var b bytes.Buffer
  4477  	if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
  4478  		return err
  4479  	}
  4480  
  4481  	// Before we write out the new edge, we'll create a new entry in the
  4482  	// update index in order to keep it fresh.
  4483  	updateUnix := uint64(edge.LastUpdate.Unix())
  4484  	var indexKey [8 + 8]byte
  4485  	byteOrder.PutUint64(indexKey[:8], updateUnix)
  4486  	byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
  4487  
  4488  	updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
  4489  	if err != nil {
  4490  		return err
  4491  	}
  4492  
  4493  	// If there was already an entry for this edge, then we'll need to
  4494  	// delete the old one to ensure we don't leave around any after-images.
  4495  	// An unknown policy value does not have a update time recorded, so
  4496  	// it also does not need to be removed.
  4497  	if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
  4498  		!bytes.Equal(edgeBytes, unknownPolicy) {
  4499  
  4500  		// In order to delete the old entry, we'll need to obtain the
  4501  		// *prior* update time in order to delete it. To do this, we'll
  4502  		// need to deserialize the existing policy within the database
  4503  		// (now outdated by the new one), and delete its corresponding
  4504  		// entry within the update index. We'll ignore any
  4505  		// ErrEdgePolicyOptionalFieldNotFound error, as we only need
  4506  		// the channel ID and update time to delete the entry.
  4507  		// TODO(halseth): get rid of these invalid policies in a
  4508  		// migration.
  4509  		oldEdgePolicy, err := deserializeChanEdgePolicy(
  4510  			bytes.NewReader(edgeBytes), nodes,
  4511  		)
  4512  		if err != nil && err != ErrEdgePolicyOptionalFieldNotFound {
  4513  			return err
  4514  		}
  4515  
  4516  		oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
  4517  
  4518  		var oldIndexKey [8 + 8]byte
  4519  		byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
  4520  		byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
  4521  
  4522  		if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
  4523  			return err
  4524  		}
  4525  	}
  4526  
  4527  	if err := updateIndex.Put(indexKey[:], nil); err != nil {
  4528  		return err
  4529  	}
  4530  
  4531  	updateEdgePolicyDisabledIndex(
  4532  		edges, edge.ChannelID,
  4533  		edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
  4534  		edge.IsDisabled(),
  4535  	)
  4536  
  4537  	return edges.Put(edgeKey[:], b.Bytes())
  4538  }
  4539  
  4540  // updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
  4541  // bucket by either add a new disabled ChannelEdgePolicy or remove an existing
  4542  // one.
  4543  // The direction represents the direction of the edge and disabled is used for
  4544  // deciding whether to remove or add an entry to the bucket.
  4545  // In general a channel is disabled if two entries for the same chanID exist
  4546  // in this bucket.
  4547  // Maintaining the bucket this way allows a fast retrieval of disabled
  4548  // channels, for example when prune is needed.
  4549  func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
  4550  	direction bool, disabled bool) error {
  4551  
  4552  	var disabledEdgeKey [8 + 1]byte
  4553  	byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
  4554  	if direction {
  4555  		disabledEdgeKey[8] = 1
  4556  	}
  4557  
  4558  	disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
  4559  		disabledEdgePolicyBucket,
  4560  	)
  4561  	if err != nil {
  4562  		return err
  4563  	}
  4564  
  4565  	if disabled {
  4566  		return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
  4567  	}
  4568  
  4569  	return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
  4570  }
  4571  
  4572  // putChanEdgePolicyUnknown marks the edge policy as unknown
  4573  // in the edges bucket.
  4574  func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
  4575  	from []byte) error {
  4576  
  4577  	var edgeKey [33 + 8]byte
  4578  	copy(edgeKey[:], from)
  4579  	byteOrder.PutUint64(edgeKey[33:], channelID)
  4580  
  4581  	if edges.Get(edgeKey[:]) != nil {
  4582  		return fmt.Errorf("cannot write unknown policy for channel %v "+
  4583  			" when there is already a policy present", channelID)
  4584  	}
  4585  
  4586  	return edges.Put(edgeKey[:], unknownPolicy)
  4587  }
  4588  
  4589  func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
  4590  	nodePub []byte, nodes kvdb.RBucket) (*ChannelEdgePolicy, error) {
  4591  
  4592  	var edgeKey [33 + 8]byte
  4593  	copy(edgeKey[:], nodePub)
  4594  	copy(edgeKey[33:], chanID)
  4595  
  4596  	edgeBytes := edges.Get(edgeKey[:])
  4597  	if edgeBytes == nil {
  4598  		return nil, ErrEdgeNotFound
  4599  	}
  4600  
  4601  	// No need to deserialize unknown policy.
  4602  	if bytes.Equal(edgeBytes, unknownPolicy) {
  4603  		return nil, nil
  4604  	}
  4605  
  4606  	edgeReader := bytes.NewReader(edgeBytes)
  4607  
  4608  	ep, err := deserializeChanEdgePolicy(edgeReader, nodes)
  4609  	switch {
  4610  	// If the db policy was missing an expected optional field, we return
  4611  	// nil as if the policy was unknown.
  4612  	case err == ErrEdgePolicyOptionalFieldNotFound:
  4613  		return nil, nil
  4614  
  4615  	case err != nil:
  4616  		return nil, err
  4617  	}
  4618  
  4619  	return ep, nil
  4620  }
  4621  
  4622  func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
  4623  	nodes kvdb.RBucket, chanID []byte,
  4624  	db kvdb.Backend) (*ChannelEdgePolicy, *ChannelEdgePolicy, error) {
  4625  
  4626  	edgeInfo := edgeIndex.Get(chanID)
  4627  	if edgeInfo == nil {
  4628  		return nil, nil, ErrEdgeNotFound
  4629  	}
  4630  
  4631  	// The first node is contained within the first half of the edge
  4632  	// information. We only propagate the error here and below if it's
  4633  	// something other than edge non-existence.
  4634  	node1Pub := edgeInfo[:33]
  4635  	edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub, nodes)
  4636  	if err != nil {
  4637  		return nil, nil, err
  4638  	}
  4639  
  4640  	// As we may have a single direction of the edge but not the other,
  4641  	// only fill in the database pointers if the edge is found.
  4642  	if edge1 != nil {
  4643  		edge1.db = db
  4644  		edge1.Node.db = db
  4645  	}
  4646  
  4647  	// Similarly, the second node is contained within the latter
  4648  	// half of the edge information.
  4649  	node2Pub := edgeInfo[33:66]
  4650  	edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub, nodes)
  4651  	if err != nil {
  4652  		return nil, nil, err
  4653  	}
  4654  
  4655  	if edge2 != nil {
  4656  		edge2.db = db
  4657  		edge2.Node.db = db
  4658  	}
  4659  
  4660  	return edge1, edge2, nil
  4661  }
  4662  
  4663  func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
  4664  	to []byte) error {
  4665  
  4666  	err := wire.WriteVarBytes(w, 0, edge.SigBytes)
  4667  	if err != nil {
  4668  		return err
  4669  	}
  4670  
  4671  	if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
  4672  		return err
  4673  	}
  4674  
  4675  	var scratch [8]byte
  4676  	updateUnix := uint64(edge.LastUpdate.Unix())
  4677  	byteOrder.PutUint64(scratch[:], updateUnix)
  4678  	if _, err := w.Write(scratch[:]); err != nil {
  4679  		return err
  4680  	}
  4681  
  4682  	if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
  4683  		return err
  4684  	}
  4685  	if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
  4686  		return err
  4687  	}
  4688  	if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
  4689  		return err
  4690  	}
  4691  	if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
  4692  		return err
  4693  	}
  4694  	if err := binary.Write(w, byteOrder, uint64(edge.FeeBaseMAtoms)); err != nil {
  4695  		return err
  4696  	}
  4697  	if err := binary.Write(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil {
  4698  		return err
  4699  	}
  4700  
  4701  	if _, err := w.Write(to); err != nil {
  4702  		return err
  4703  	}
  4704  
  4705  	// If the max_htlc field is present, we write it. To be compatible with
  4706  	// older versions that wasn't aware of this field, we write it as part
  4707  	// of the opaque data.
  4708  	// TODO(halseth): clean up when moving to TLV.
  4709  	var opaqueBuf bytes.Buffer
  4710  	if edge.MessageFlags.HasMaxHtlc() {
  4711  		err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
  4712  		if err != nil {
  4713  			return err
  4714  		}
  4715  	}
  4716  
  4717  	if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
  4718  		return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
  4719  	}
  4720  	if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
  4721  		return err
  4722  	}
  4723  
  4724  	if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
  4725  		return err
  4726  	}
  4727  	return nil
  4728  }
  4729  
  4730  func deserializeChanEdgePolicy(r io.Reader,
  4731  	nodes kvdb.RBucket) (*ChannelEdgePolicy, error) {
  4732  
  4733  	// Deserialize the policy. Note that in case an optional field is not
  4734  	// found, both an error and a populated policy object are returned.
  4735  	edge, deserializeErr := deserializeChanEdgePolicyRaw(r)
  4736  	if deserializeErr != nil &&
  4737  		deserializeErr != ErrEdgePolicyOptionalFieldNotFound {
  4738  
  4739  		return nil, deserializeErr
  4740  	}
  4741  
  4742  	// Populate full LightningNode struct.
  4743  	pub := edge.Node.PubKeyBytes[:]
  4744  	node, err := fetchLightningNode(nodes, pub)
  4745  	if err != nil {
  4746  		return nil, fmt.Errorf("unable to fetch node: %x, %v", pub, err)
  4747  	}
  4748  	edge.Node = &node
  4749  
  4750  	return edge, deserializeErr
  4751  }
  4752  
  4753  func deserializeChanEdgePolicyRaw(r io.Reader) (*ChannelEdgePolicy, error) {
  4754  	edge := &ChannelEdgePolicy{}
  4755  
  4756  	var err error
  4757  	edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
  4758  	if err != nil {
  4759  		return nil, err
  4760  	}
  4761  
  4762  	if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
  4763  		return nil, err
  4764  	}
  4765  
  4766  	var scratch [8]byte
  4767  	if _, err := r.Read(scratch[:]); err != nil {
  4768  		return nil, err
  4769  	}
  4770  	unix := int64(byteOrder.Uint64(scratch[:]))
  4771  	edge.LastUpdate = time.Unix(unix, 0)
  4772  
  4773  	if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
  4774  		return nil, err
  4775  	}
  4776  	if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
  4777  		return nil, err
  4778  	}
  4779  	if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
  4780  		return nil, err
  4781  	}
  4782  
  4783  	var n uint64
  4784  	if err := binary.Read(r, byteOrder, &n); err != nil {
  4785  		return nil, err
  4786  	}
  4787  	edge.MinHTLC = lnwire.MilliAtom(n)
  4788  
  4789  	if err := binary.Read(r, byteOrder, &n); err != nil {
  4790  		return nil, err
  4791  	}
  4792  	edge.FeeBaseMAtoms = lnwire.MilliAtom(n)
  4793  
  4794  	if err := binary.Read(r, byteOrder, &n); err != nil {
  4795  		return nil, err
  4796  	}
  4797  	edge.FeeProportionalMillionths = lnwire.MilliAtom(n)
  4798  
  4799  	var pub [33]byte
  4800  	if _, err := r.Read(pub[:]); err != nil {
  4801  		return nil, err
  4802  	}
  4803  	edge.Node = &LightningNode{
  4804  		PubKeyBytes: pub,
  4805  	}
  4806  
  4807  	// We'll try and see if there are any opaque bytes left, if not, then
  4808  	// we'll ignore the EOF error and return the edge as is.
  4809  	edge.ExtraOpaqueData, err = wire.ReadVarBytes(
  4810  		r, 0, MaxAllowedExtraOpaqueBytes, "blob",
  4811  	)
  4812  	switch {
  4813  	case err == io.ErrUnexpectedEOF:
  4814  	case err == io.EOF:
  4815  	case err != nil:
  4816  		return nil, err
  4817  	}
  4818  
  4819  	// See if optional fields are present.
  4820  	if edge.MessageFlags.HasMaxHtlc() {
  4821  		// The max_htlc field should be at the beginning of the opaque
  4822  		// bytes.
  4823  		opq := edge.ExtraOpaqueData
  4824  
  4825  		// If the max_htlc field is not present, it might be old data
  4826  		// stored before this field was validated. We'll return the
  4827  		// edge along with an error.
  4828  		if len(opq) < 8 {
  4829  			return edge, ErrEdgePolicyOptionalFieldNotFound
  4830  		}
  4831  
  4832  		maxHtlc := byteOrder.Uint64(opq[:8])
  4833  		edge.MaxHTLC = lnwire.MilliAtom(maxHtlc)
  4834  
  4835  		// Exclude the parsed field from the rest of the opaque data.
  4836  		edge.ExtraOpaqueData = opq[8:]
  4837  	}
  4838  
  4839  	return edge, nil
  4840  }