github.com/decred/dcrlnd@v0.7.6/channeldb/channel.go (about)

     1  package channeldb
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/sha256"
     6  	"encoding/binary"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	"net"
    11  	"strconv"
    12  	"strings"
    13  	"sync"
    14  
    15  	"github.com/decred/dcrd/chaincfg/chainhash"
    16  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    17  	"github.com/decred/dcrd/dcrutil/v4"
    18  	"github.com/decred/dcrd/wire"
    19  	"github.com/decred/dcrlnd/input"
    20  	"github.com/decred/dcrlnd/keychain"
    21  	"github.com/decred/dcrlnd/kvdb"
    22  	"github.com/decred/dcrlnd/lnwire"
    23  	"github.com/decred/dcrlnd/shachain"
    24  	"github.com/decred/dcrlnd/tlv"
    25  )
    26  
    27  const (
    28  	// AbsoluteThawHeightThreshold is the threshold at which a thaw height
    29  	// begins to be interpreted as an absolute block height, rather than a
    30  	// relative one.
    31  	AbsoluteThawHeightThreshold uint32 = 500000
    32  )
    33  
    34  var (
    35  	// closedChannelBucket stores summarization information concerning
    36  	// previously open, but now closed channels.
    37  	closedChannelBucket = []byte("closed-chan-bucket")
    38  
    39  	// openChannelBucket stores all the currently open channels. This bucket
    40  	// has a second, nested bucket which is keyed by a node's ID. Within
    41  	// that node ID bucket, all attributes required to track, update, and
    42  	// close a channel are stored.
    43  	//
    44  	// openChan -> nodeID -> chanPoint
    45  	//
    46  	// TODO(roasbeef): flesh out comment
    47  	openChannelBucket = []byte("open-chan-bucket")
    48  
    49  	// outpointBucket stores all of our channel outpoints and a tlv
    50  	// stream containing channel data.
    51  	//
    52  	// outpoint -> tlv stream
    53  	//
    54  	outpointBucket = []byte("outpoint-bucket")
    55  
    56  	// historicalChannelBucket stores all channels that have seen their
    57  	// commitment tx confirm. All information from their previous open state
    58  	// is retained.
    59  	historicalChannelBucket = []byte("historical-chan-bucket")
    60  
    61  	// chanInfoKey can be accessed within the bucket for a channel
    62  	// (identified by its chanPoint). This key stores all the static
    63  	// information for a channel which is decided at the end of  the
    64  	// funding flow.
    65  	chanInfoKey = []byte("chan-info-key")
    66  
    67  	// localUpfrontShutdownKey can be accessed within the bucket for a channel
    68  	// (identified by its chanPoint). This key stores an optional upfront
    69  	// shutdown script for the local peer.
    70  	localUpfrontShutdownKey = []byte("local-upfront-shutdown-key")
    71  
    72  	// remoteUpfrontShutdownKey can be accessed within the bucket for a channel
    73  	// (identified by its chanPoint). This key stores an optional upfront
    74  	// shutdown script for the remote peer.
    75  	remoteUpfrontShutdownKey = []byte("remote-upfront-shutdown-key")
    76  
    77  	// chanCommitmentKey can be accessed within the sub-bucket for a
    78  	// particular channel. This key stores the up to date commitment state
    79  	// for a particular channel party. Appending a 0 to the end of this key
    80  	// indicates it's the commitment for the local party, and appending a 1
    81  	// to the end of this key indicates it's the commitment for the remote
    82  	// party.
    83  	chanCommitmentKey = []byte("chan-commitment-key")
    84  
    85  	// unsignedAckedUpdatesKey is an entry in the channel bucket that
    86  	// contains the remote updates that we have acked, but not yet signed
    87  	// for in one of our remote commits.
    88  	unsignedAckedUpdatesKey = []byte("unsigned-acked-updates-key")
    89  
    90  	// remoteUnsignedLocalUpdatesKey is an entry in the channel bucket that
    91  	// contains the local updates that the remote party has acked, but
    92  	// has not yet signed for in one of their local commits.
    93  	remoteUnsignedLocalUpdatesKey = []byte("remote-unsigned-local-updates-key")
    94  
    95  	// revocationStateKey stores their current revocation hash, our
    96  	// preimage producer and their preimage store.
    97  	revocationStateKey = []byte("revocation-state-key")
    98  
    99  	// dataLossCommitPointKey stores the commitment point received from the
   100  	// remote peer during a channel sync in case we have lost channel state.
   101  	dataLossCommitPointKey = []byte("data-loss-commit-point-key")
   102  
   103  	// forceCloseTxKey points to a the unilateral closing tx that we
   104  	// broadcasted when moving the channel to state CommitBroadcasted.
   105  	forceCloseTxKey = []byte("closing-tx-key")
   106  
   107  	// coopCloseTxKey points to a the cooperative closing tx that we
   108  	// broadcasted when moving the channel to state CoopBroadcasted.
   109  	coopCloseTxKey = []byte("coop-closing-tx-key")
   110  
   111  	// commitDiffKey stores the current pending commitment state we've
   112  	// extended to the remote party (if any). Each time we propose a new
   113  	// state, we store the information necessary to reconstruct this state
   114  	// from the prior commitment. This allows us to resync the remote party
   115  	// to their expected state in the case of message loss.
   116  	//
   117  	// TODO(roasbeef): rename to commit chain?
   118  	commitDiffKey = []byte("commit-diff-key")
   119  
   120  	// revocationLogBucket is dedicated for storing the necessary delta
   121  	// state between channel updates required to re-construct a past state
   122  	// in order to punish a counterparty attempting a non-cooperative
   123  	// channel closure. This key should be accessed from within the
   124  	// sub-bucket of a target channel, identified by its channel point.
   125  	revocationLogBucket = []byte("revocation-log-key")
   126  
   127  	// frozenChanKey is the key where we store the information for any
   128  	// active "frozen" channels. This key is present only in the leaf
   129  	// bucket for a given channel.
   130  	frozenChanKey = []byte("frozen-chans")
   131  
   132  	// lastWasRevokeKey is a key that stores true when the last update we sent
   133  	// was a revocation and false when it was a commitment signature. This is
   134  	// nil in the case of new channels with no updates exchanged.
   135  	lastWasRevokeKey = []byte("last-was-revoke")
   136  )
   137  
   138  var (
   139  	// ErrNoCommitmentsFound is returned when a channel has not set
   140  	// commitment states.
   141  	ErrNoCommitmentsFound = fmt.Errorf("no commitments found")
   142  
   143  	// ErrNoChanInfoFound is returned when a particular channel does not
   144  	// have any channels state.
   145  	ErrNoChanInfoFound = fmt.Errorf("no chan info found")
   146  
   147  	// ErrNoRevocationsFound is returned when revocation state for a
   148  	// particular channel cannot be found.
   149  	ErrNoRevocationsFound = fmt.Errorf("no revocations found")
   150  
   151  	// ErrNoPendingCommit is returned when there is not a pending
   152  	// commitment for a remote party. A new commitment is written to disk
   153  	// each time we write a new state in order to be properly fault
   154  	// tolerant.
   155  	ErrNoPendingCommit = fmt.Errorf("no pending commits found")
   156  
   157  	// ErrInvalidCircuitKeyLen signals that a circuit key could not be
   158  	// decoded because the byte slice is of an invalid length.
   159  	ErrInvalidCircuitKeyLen = fmt.Errorf(
   160  		"length of serialized circuit key must be 16 bytes")
   161  
   162  	// ErrNoCommitPoint is returned when no data loss commit point is found
   163  	// in the database.
   164  	ErrNoCommitPoint = fmt.Errorf("no commit point found")
   165  
   166  	// ErrNoCloseTx is returned when no closing tx is found for a channel
   167  	// in the state CommitBroadcasted.
   168  	ErrNoCloseTx = fmt.Errorf("no closing tx found")
   169  
   170  	// ErrNoRestoredChannelMutation is returned when a caller attempts to
   171  	// mutate a channel that's been recovered.
   172  	ErrNoRestoredChannelMutation = fmt.Errorf("cannot mutate restored " +
   173  		"channel state")
   174  
   175  	// ErrChanBorked is returned when a caller attempts to mutate a borked
   176  	// channel.
   177  	ErrChanBorked = fmt.Errorf("cannot mutate borked channel")
   178  
   179  	// ErrLogEntryNotFound is returned when we cannot find a log entry at
   180  	// the height requested in the revocation log.
   181  	ErrLogEntryNotFound = fmt.Errorf("log entry not found")
   182  
   183  	// ErrMissingIndexEntry is returned when a caller attempts to close a
   184  	// channel and the outpoint is missing from the index.
   185  	ErrMissingIndexEntry = fmt.Errorf("missing outpoint from index")
   186  
   187  	// errHeightNotFound is returned when a query for channel balances at
   188  	// a height that we have not reached yet is made.
   189  	errHeightNotReached = fmt.Errorf("height requested greater than " +
   190  		"current commit height")
   191  )
   192  
   193  const (
   194  	// A tlv type definition used to serialize an outpoint's indexStatus
   195  	// for use in the outpoint index.
   196  	indexStatusType tlv.Type = 0
   197  
   198  	// A tlv type definition used to serialize and deserialize a KeyLocator
   199  	// from the database.
   200  	keyLocType tlv.Type = 1
   201  )
   202  
   203  // indexStatus is an enum-like type that describes what state the
   204  // outpoint is in. Currently only two possible values.
   205  type indexStatus uint8
   206  
   207  const (
   208  	// outpointOpen represents an outpoint that is open in the outpoint index.
   209  	outpointOpen indexStatus = 0
   210  
   211  	// outpointClosed represents an outpoint that is closed in the outpoint
   212  	// index.
   213  	outpointClosed indexStatus = 1
   214  )
   215  
   216  // ChannelType is an enum-like type that describes one of several possible
   217  // channel types. Each open channel is associated with a particular type as the
   218  // channel type may determine how higher level operations are conducted such as
   219  // fee negotiation, channel closing, the format of HTLCs, etc. Structure-wise,
   220  // a ChannelType is a bit field, with each bit denoting a modification from the
   221  // base channel type of single funder.
   222  type ChannelType uint8
   223  
   224  const (
   225  	// NOTE: iota isn't used here for this enum needs to be stable
   226  	// long-term as it will be persisted to the database.
   227  
   228  	// SingleFunderBit represents a channel wherein one party solely funds
   229  	// the entire capacity of the channel.
   230  	SingleFunderBit ChannelType = 0
   231  
   232  	// DualFunderBit represents a channel wherein both parties contribute
   233  	// funds towards the total capacity of the channel. The channel may be
   234  	// funded symmetrically or asymmetrically.
   235  	DualFunderBit ChannelType = 1 << 0
   236  
   237  	// SingleFunderTweaklessBit is similar to the basic SingleFunder channel
   238  	// type, but it omits the tweak for one's key in the commitment
   239  	// transaction of the remote party.
   240  	SingleFunderTweaklessBit ChannelType = 1 << 1
   241  
   242  	// NoFundingTxBit denotes if we have the funding transaction locally on
   243  	// disk. This bit may be on if the funding transaction was crafted by a
   244  	// wallet external to the primary daemon.
   245  	NoFundingTxBit ChannelType = 1 << 2
   246  
   247  	// AnchorOutputsBit indicates that the channel makes use of anchor
   248  	// outputs to bump the commitment transaction's effective feerate. This
   249  	// channel type also uses a delayed to_remote output script.
   250  	AnchorOutputsBit ChannelType = 1 << 3
   251  
   252  	// FrozenBit indicates that the channel is a frozen channel, meaning
   253  	// that only the responder can decide to cooperatively close the
   254  	// channel.
   255  	FrozenBit ChannelType = 1 << 4
   256  
   257  	// ZeroHtlcTxFeeBit indicates that the channel should use zero-fee
   258  	// second-level HTLC transactions.
   259  	ZeroHtlcTxFeeBit ChannelType = 1 << 5
   260  
   261  	// LeaseExpirationBit indicates that the channel has been leased for a
   262  	// period of time, constraining every output that pays to the channel
   263  	// initiator with an additional CLTV of the lease maturity.
   264  	LeaseExpirationBit ChannelType = 1 << 6
   265  )
   266  
   267  // IsSingleFunder returns true if the channel type if one of the known single
   268  // funder variants.
   269  func (c ChannelType) IsSingleFunder() bool {
   270  	return c&DualFunderBit == 0
   271  }
   272  
   273  // IsDualFunder returns true if the ChannelType has the DualFunderBit set.
   274  func (c ChannelType) IsDualFunder() bool {
   275  	return c&DualFunderBit == DualFunderBit
   276  }
   277  
   278  // IsTweakless returns true if the target channel uses a commitment that
   279  // doesn't tweak the key for the remote party.
   280  func (c ChannelType) IsTweakless() bool {
   281  	return c&SingleFunderTweaklessBit == SingleFunderTweaklessBit
   282  }
   283  
   284  // HasFundingTx returns true if this channel type is one that has a funding
   285  // transaction stored locally.
   286  func (c ChannelType) HasFundingTx() bool {
   287  	return c&NoFundingTxBit == 0
   288  }
   289  
   290  // HasAnchors returns true if this channel type has anchor ouputs on its
   291  // commitment.
   292  func (c ChannelType) HasAnchors() bool {
   293  	return c&AnchorOutputsBit == AnchorOutputsBit
   294  }
   295  
   296  // ZeroHtlcTxFee returns true if this channel type uses second-level HTLC
   297  // transactions signed with zero-fee.
   298  func (c ChannelType) ZeroHtlcTxFee() bool {
   299  	return c&ZeroHtlcTxFeeBit == ZeroHtlcTxFeeBit
   300  }
   301  
   302  // IsFrozen returns true if the channel is considered to be "frozen". A frozen
   303  // channel means that only the responder can initiate a cooperative channel
   304  // closure.
   305  func (c ChannelType) IsFrozen() bool {
   306  	return c&FrozenBit == FrozenBit
   307  }
   308  
   309  // HasLeaseExpiration returns true if the channel originated from a lease.
   310  func (c ChannelType) HasLeaseExpiration() bool {
   311  	return c&LeaseExpirationBit == LeaseExpirationBit
   312  }
   313  
   314  // ChannelConstraints represents a set of constraints meant to allow a node to
   315  // limit their exposure, enact flow control and ensure that all HTLCs are
   316  // economically relevant. This struct will be mirrored for both sides of the
   317  // channel, as each side will enforce various constraints that MUST be adhered
   318  // to for the life time of the channel. The parameters for each of these
   319  // constraints are static for the duration of the channel, meaning the channel
   320  // must be torn down for them to change.
   321  type ChannelConstraints struct {
   322  	// DustLimit is the threhsold (in atoms) below which any outputs
   323  	// should be trimmed. When an output is trimmed, it isn't materialized
   324  	// as an actual output, but is instead burned to miner's fees.
   325  	DustLimit dcrutil.Amount
   326  
   327  	// ChanReserve is an absolute reservation on the channel for the
   328  	// owner of this set of constraints. This means that the current
   329  	// settled balance for this node CANNOT dip below the reservation
   330  	// amount. This acts as a defense against costless attacks when
   331  	// either side no longer has any skin in the game.
   332  	ChanReserve dcrutil.Amount
   333  
   334  	// MaxPendingAmount is the maximum pending HTLC value that the
   335  	// owner of these constraints can offer the remote node at a
   336  	// particular time.
   337  	MaxPendingAmount lnwire.MilliAtom
   338  
   339  	// MinHTLC is the minimum HTLC value that the owner of these
   340  	// constraints can offer the remote node. If any HTLCs below this
   341  	// amount are offered, then the HTLC will be rejected. This, in
   342  	// tandem with the dust limit allows a node to regulate the
   343  	// smallest HTLC that it deems economically relevant.
   344  	MinHTLC lnwire.MilliAtom
   345  
   346  	// MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of
   347  	// this set of constraints can offer the remote node. This allows each
   348  	// node to limit their over all exposure to HTLCs that may need to be
   349  	// acted upon in the case of a unilateral channel closure or a contract
   350  	// breach.
   351  	MaxAcceptedHtlcs uint16
   352  
   353  	// CsvDelay is the relative time lock delay expressed in blocks. Any
   354  	// settled outputs that pay to the owner of this channel configuration
   355  	// MUST ensure that the delay branch uses this value as the relative
   356  	// time lock. Similarly, any HTLC's offered by this node should use
   357  	// this value as well.
   358  	CsvDelay uint16
   359  }
   360  
   361  // ChannelConfig is a struct that houses the various configuration opens for
   362  // channels. Each side maintains an instance of this configuration file as it
   363  // governs: how the funding and commitment transaction to be created, the
   364  // nature of HTLC's allotted, the keys to be used for delivery, and relative
   365  // time lock parameters.
   366  type ChannelConfig struct {
   367  	// ChannelConstraints is the set of constraints that must be upheld for
   368  	// the duration of the channel for the owner of this channel
   369  	// configuration. Constraints govern a number of flow control related
   370  	// parameters, also including the smallest HTLC that will be accepted
   371  	// by a participant.
   372  	ChannelConstraints
   373  
   374  	// MultiSigKey is the key to be used within the 2-of-2 output script
   375  	// for the owner of this channel config.
   376  	MultiSigKey keychain.KeyDescriptor
   377  
   378  	// RevocationBasePoint is the base public key to be used when deriving
   379  	// revocation keys for the remote node's commitment transaction. This
   380  	// will be combined along with a per commitment secret to derive a
   381  	// unique revocation key for each state.
   382  	RevocationBasePoint keychain.KeyDescriptor
   383  
   384  	// PaymentBasePoint is the base public key to be used when deriving
   385  	// the key used within the non-delayed pay-to-self output on the
   386  	// commitment transaction for a node. This will be combined with a
   387  	// tweak derived from the per-commitment point to ensure unique keys
   388  	// for each commitment transaction.
   389  	PaymentBasePoint keychain.KeyDescriptor
   390  
   391  	// DelayBasePoint is the base public key to be used when deriving the
   392  	// key used within the delayed pay-to-self output on the commitment
   393  	// transaction for a node. This will be combined with a tweak derived
   394  	// from the per-commitment point to ensure unique keys for each
   395  	// commitment transaction.
   396  	DelayBasePoint keychain.KeyDescriptor
   397  
   398  	// HtlcBasePoint is the base public key to be used when deriving the
   399  	// local HTLC key. The derived key (combined with the tweak derived
   400  	// from the per-commitment point) is used within the "to self" clause
   401  	// within any HTLC output scripts.
   402  	HtlcBasePoint keychain.KeyDescriptor
   403  }
   404  
   405  // ChannelCommitment is a snapshot of the commitment state at a particular
   406  // point in the commitment chain. With each state transition, a snapshot of the
   407  // current state along with all non-settled HTLCs are recorded. These snapshots
   408  // detail the state of the _remote_ party's commitment at a particular state
   409  // number.  For ourselves (the local node) we ONLY store our most recent
   410  // (unrevoked) state for safety purposes.
   411  type ChannelCommitment struct {
   412  	// CommitHeight is the update number that this ChannelDelta represents
   413  	// the total number of commitment updates to this point. This can be
   414  	// viewed as sort of a "commitment height" as this number is
   415  	// monotonically increasing.
   416  	CommitHeight uint64
   417  
   418  	// LocalLogIndex is the cumulative log index index of the local node at
   419  	// this point in the commitment chain. This value will be incremented
   420  	// for each _update_ added to the local update log.
   421  	LocalLogIndex uint64
   422  
   423  	// LocalHtlcIndex is the current local running HTLC index. This value
   424  	// will be incremented for each outgoing HTLC the local node offers.
   425  	LocalHtlcIndex uint64
   426  
   427  	// RemoteLogIndex is the cumulative log index index of the remote node
   428  	// at this point in the commitment chain. This value will be
   429  	// incremented for each _update_ added to the remote update log.
   430  	RemoteLogIndex uint64
   431  
   432  	// RemoteHtlcIndex is the current remote running HTLC index. This value
   433  	// will be incremented for each outgoing HTLC the remote node offers.
   434  	RemoteHtlcIndex uint64
   435  
   436  	// LocalBalance is the current available settled balance within the
   437  	// channel directly spendable by us.
   438  	//
   439  	// NOTE: This is the balance *after* subtracting any commitment fee,
   440  	// AND anchor output values.
   441  	LocalBalance lnwire.MilliAtom
   442  
   443  	// RemoteBalance is the current available settled balance within the
   444  	// channel directly spendable by the remote node.
   445  	//
   446  	// NOTE: This is the balance *after* subtracting any commitment fee,
   447  	// AND anchor output values.
   448  	RemoteBalance lnwire.MilliAtom
   449  
   450  	// CommitFee is the amount calculated to be paid in fees for the
   451  	// current set of commitment transactions. The fee amount is persisted
   452  	// with the channel in order to allow the fee amount to be removed and
   453  	// recalculated with each channel state update, including updates that
   454  	// happen after a system restart.
   455  	CommitFee dcrutil.Amount
   456  
   457  	// FeePerKB is the min atoms/kilobyte that should be paid within the
   458  	// commitment transaction for the entire duration of the channel's
   459  	// lifetime. This field may be updated during normal operation of the
   460  	// channel as on-chain conditions change.
   461  	//
   462  	// TODO(halseth): make this AtomsPerKByte. Cannot be done atm because
   463  	// this will cause the import cycle lnwallet<->channeldb. Fee
   464  	// estimation stuff should be in its own package.
   465  	FeePerKB dcrutil.Amount
   466  
   467  	// CommitTx is the latest version of the commitment state, broadcast
   468  	// able by us.
   469  	CommitTx *wire.MsgTx
   470  
   471  	// CommitSig is one half of the signature required to fully complete
   472  	// the script for the commitment transaction above. This is the
   473  	// signature signed by the remote party for our version of the
   474  	// commitment transactions.
   475  	CommitSig []byte
   476  
   477  	// Htlcs is the set of HTLC's that are pending at this particular
   478  	// commitment height.
   479  	Htlcs []HTLC
   480  
   481  	// TODO(roasbeef): pending commit pointer?
   482  	//  * lets just walk through
   483  }
   484  
   485  // ChannelStatus is a bit vector used to indicate whether an OpenChannel is in
   486  // the default usable state, or a state where it shouldn't be used.
   487  type ChannelStatus uint8
   488  
   489  var (
   490  	// ChanStatusDefault is the normal state of an open channel.
   491  	ChanStatusDefault ChannelStatus
   492  
   493  	// ChanStatusBorked indicates that the channel has entered an
   494  	// irreconcilable state, triggered by a state desynchronization or
   495  	// channel breach.  Channels in this state should never be added to the
   496  	// htlc switch.
   497  	ChanStatusBorked ChannelStatus = 1
   498  
   499  	// ChanStatusCommitBroadcasted indicates that a commitment for this
   500  	// channel has been broadcasted.
   501  	ChanStatusCommitBroadcasted ChannelStatus = 1 << 1
   502  
   503  	// ChanStatusLocalDataLoss indicates that we have lost channel state
   504  	// for this channel, and broadcasting our latest commitment might be
   505  	// considered a breach.
   506  	//
   507  	// TODO(halseh): actually enforce that we are not force closing such a
   508  	// channel.
   509  	ChanStatusLocalDataLoss ChannelStatus = 1 << 2
   510  
   511  	// ChanStatusRestored is a status flag that signals that the channel
   512  	// has been restored, and doesn't have all the fields a typical channel
   513  	// will have.
   514  	ChanStatusRestored ChannelStatus = 1 << 3
   515  
   516  	// ChanStatusCoopBroadcasted indicates that a cooperative close for
   517  	// this channel has been broadcasted. Older cooperatively closed
   518  	// channels will only have this status set. Newer ones will also have
   519  	// close initiator information stored using the local/remote initiator
   520  	// status. This status is set in conjunction with the initiator status
   521  	// so that we do not need to check multiple channel statues for
   522  	// cooperative closes.
   523  	ChanStatusCoopBroadcasted ChannelStatus = 1 << 4
   524  
   525  	// ChanStatusLocalCloseInitiator indicates that we initiated closing
   526  	// the channel.
   527  	ChanStatusLocalCloseInitiator ChannelStatus = 1 << 5
   528  
   529  	// ChanStatusRemoteCloseInitiator indicates that the remote node
   530  	// initiated closing the channel.
   531  	ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6
   532  )
   533  
   534  // chanStatusStrings maps a ChannelStatus to a human friendly string that
   535  // describes that status.
   536  var chanStatusStrings = map[ChannelStatus]string{
   537  	ChanStatusDefault:              "ChanStatusDefault",
   538  	ChanStatusBorked:               "ChanStatusBorked",
   539  	ChanStatusCommitBroadcasted:    "ChanStatusCommitBroadcasted",
   540  	ChanStatusLocalDataLoss:        "ChanStatusLocalDataLoss",
   541  	ChanStatusRestored:             "ChanStatusRestored",
   542  	ChanStatusCoopBroadcasted:      "ChanStatusCoopBroadcasted",
   543  	ChanStatusLocalCloseInitiator:  "ChanStatusLocalCloseInitiator",
   544  	ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator",
   545  }
   546  
   547  // orderedChanStatusFlags is an in-order list of all that channel status flags.
   548  var orderedChanStatusFlags = []ChannelStatus{
   549  	ChanStatusBorked,
   550  	ChanStatusCommitBroadcasted,
   551  	ChanStatusLocalDataLoss,
   552  	ChanStatusRestored,
   553  	ChanStatusCoopBroadcasted,
   554  	ChanStatusLocalCloseInitiator,
   555  	ChanStatusRemoteCloseInitiator,
   556  }
   557  
   558  // String returns a human-readable representation of the ChannelStatus.
   559  func (c ChannelStatus) String() string {
   560  	// If no flags are set, then this is the default case.
   561  	if c == ChanStatusDefault {
   562  		return chanStatusStrings[ChanStatusDefault]
   563  	}
   564  
   565  	// Add individual bit flags.
   566  	statusStr := ""
   567  	for _, flag := range orderedChanStatusFlags {
   568  		if c&flag == flag {
   569  			statusStr += chanStatusStrings[flag] + "|"
   570  			c -= flag
   571  		}
   572  	}
   573  
   574  	// Remove anything to the right of the final bar, including it as well.
   575  	statusStr = strings.TrimRight(statusStr, "|")
   576  
   577  	// Add any remaining flags which aren't accounted for as hex.
   578  	if c != 0 {
   579  		statusStr += "|0x" + strconv.FormatUint(uint64(c), 16)
   580  	}
   581  
   582  	// If this was purely an unknown flag, then remove the extra bar at the
   583  	// start of the string.
   584  	statusStr = strings.TrimLeft(statusStr, "|")
   585  
   586  	return statusStr
   587  }
   588  
   589  // OpenChannel encapsulates the persistent and dynamic state of an open channel
   590  // with a remote node. An open channel supports several options for on-disk
   591  // serialization depending on the exact context. Full (upon channel creation)
   592  // state commitments, and partial (due to a commitment update) writes are
   593  // supported. Each partial write due to a state update appends the new update
   594  // to an on-disk log, which can then subsequently be queried in order to
   595  // "time-travel" to a prior state.
   596  type OpenChannel struct {
   597  	// ChanType denotes which type of channel this is.
   598  	ChanType ChannelType
   599  
   600  	// ChainHash is a hash which represents the blockchain that this
   601  	// channel will be opened within. This value is typically the genesis
   602  	// hash. In the case that the original chain went through a contentious
   603  	// hard-fork, then this value will be tweaked using the unique fork
   604  	// point on each branch.
   605  	ChainHash chainhash.Hash
   606  
   607  	// FundingOutpoint is the outpoint of the final funding transaction.
   608  	// This value uniquely and globally identifies the channel within the
   609  	// target blockchain as specified by the chain hash parameter.
   610  	FundingOutpoint wire.OutPoint
   611  
   612  	// ShortChannelID encodes the exact location in the chain in which the
   613  	// channel was initially confirmed. This includes: the block height,
   614  	// transaction index, and the output within the target transaction.
   615  	ShortChannelID lnwire.ShortChannelID
   616  
   617  	// IsPending indicates whether a channel's funding transaction has been
   618  	// confirmed.
   619  	IsPending bool
   620  
   621  	// IsInitiator is a bool which indicates if we were the original
   622  	// initiator for the channel. This value may affect how higher levels
   623  	// negotiate fees, or close the channel.
   624  	IsInitiator bool
   625  
   626  	// chanStatus is the current status of this channel. If it is not in
   627  	// the state Default, it should not be used for forwarding payments.
   628  	chanStatus ChannelStatus
   629  
   630  	// FundingBroadcastHeight is the height in which the funding
   631  	// transaction was broadcast. This value can be used by higher level
   632  	// sub-systems to determine if a channel is stale and/or should have
   633  	// been confirmed before a certain height.
   634  	FundingBroadcastHeight uint32
   635  
   636  	// NumConfsRequired is the number of confirmations a channel's funding
   637  	// transaction must have received in order to be considered available
   638  	// for normal transactional use.
   639  	NumConfsRequired uint16
   640  
   641  	// ChannelFlags holds the flags that were sent as part of the
   642  	// open_channel message.
   643  	ChannelFlags lnwire.FundingFlag
   644  
   645  	// IdentityPub is the identity public key of the remote node this
   646  	// channel has been established with.
   647  	IdentityPub *secp256k1.PublicKey
   648  
   649  	// Capacity is the total capacity of this channel.
   650  	Capacity dcrutil.Amount
   651  
   652  	// TotalMAtomsSent is the total number of milli-atoms we've sent
   653  	// within this channel.
   654  	TotalMAtomsSent lnwire.MilliAtom
   655  
   656  	// TotalMAtomsReceived is the total number of milli-atoms we've
   657  	// received within this channel.
   658  	TotalMAtomsReceived lnwire.MilliAtom
   659  
   660  	// LocalChanCfg is the channel configuration for the local node.
   661  	LocalChanCfg ChannelConfig
   662  
   663  	// RemoteChanCfg is the channel configuration for the remote node.
   664  	RemoteChanCfg ChannelConfig
   665  
   666  	// LocalCommitment is the current local commitment state for the local
   667  	// party. This is stored distinct from the state of the remote party
   668  	// as there are certain asymmetric parameters which affect the
   669  	// structure of each commitment.
   670  	LocalCommitment ChannelCommitment
   671  
   672  	// RemoteCommitment is the current remote commitment state for the
   673  	// remote party. This is stored distinct from the state of the local
   674  	// party as there are certain asymmetric parameters which affect the
   675  	// structure of each commitment.
   676  	RemoteCommitment ChannelCommitment
   677  
   678  	// RemoteCurrentRevocation is the current revocation for their
   679  	// commitment transaction. However, since this the derived public key,
   680  	// we don't yet have the private key so we aren't yet able to verify
   681  	// that it's actually in the hash chain.
   682  	RemoteCurrentRevocation *secp256k1.PublicKey
   683  
   684  	// RemoteNextRevocation is the revocation key to be used for the *next*
   685  	// commitment transaction we create for the local node. Within the
   686  	// specification, this value is referred to as the
   687  	// per-commitment-point.
   688  	RemoteNextRevocation *secp256k1.PublicKey
   689  
   690  	// RevocationProducer is used to generate the revocation in such a way
   691  	// that remote side might store it efficiently and have the ability to
   692  	// restore the revocation by index if needed. Current implementation of
   693  	// secret producer is shachain producer.
   694  	RevocationProducer shachain.Producer
   695  
   696  	// RevocationStore is used to efficiently store the revocations for
   697  	// previous channels states sent to us by remote side. Current
   698  	// implementation of secret store is shachain store.
   699  	RevocationStore shachain.Store
   700  
   701  	// Packager is used to create and update forwarding packages for this
   702  	// channel, which encodes all necessary information to recover from
   703  	// failures and reforward HTLCs that were not fully processed.
   704  	Packager FwdPackager
   705  
   706  	// FundingTxn is the transaction containing this channel's funding
   707  	// outpoint. Upon restarts, this txn will be rebroadcast if the channel
   708  	// is found to be pending.
   709  	//
   710  	// NOTE: This value will only be populated for single-funder channels
   711  	// for which we are the initiator, and that we also have the funding
   712  	// transaction for. One can check this by using the HasFundingTx()
   713  	// method on the ChanType field.
   714  	FundingTxn *wire.MsgTx
   715  
   716  	// LocalShutdownScript is set to a pre-set script if the channel was opened
   717  	// by the local node with option_upfront_shutdown_script set. If the option
   718  	// was not set, the field is empty.
   719  	LocalShutdownScript lnwire.DeliveryAddress
   720  
   721  	// RemoteShutdownScript is set to a pre-set script if the channel was opened
   722  	// by the remote node with option_upfront_shutdown_script set. If the option
   723  	// was not set, the field is empty.
   724  	RemoteShutdownScript lnwire.DeliveryAddress
   725  
   726  	// ThawHeight is the height when a frozen channel once again becomes a
   727  	// normal channel. If this is zero, then there're no restrictions on
   728  	// this channel. If the value is lower than 500,000, then it's
   729  	// interpreted as a relative height, or an absolute height otherwise.
   730  	ThawHeight uint32
   731  
   732  	// LastWasRevoke is a boolean that determines if the last update we sent
   733  	// was a revocation (true) or a commitment signature (false).
   734  	LastWasRevoke bool
   735  
   736  	// RevocationKeyLocator stores the KeyLocator information that we will
   737  	// need to derive the shachain root for this channel. This allows us to
   738  	// have private key isolation from lnd.
   739  	RevocationKeyLocator keychain.KeyLocator
   740  
   741  	// TODO(roasbeef): eww
   742  	Db *ChannelStateDB
   743  
   744  	// TODO(roasbeef): just need to store local and remote HTLC's?
   745  
   746  	sync.RWMutex
   747  }
   748  
   749  // ShortChanID returns the current ShortChannelID of this channel.
   750  func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID {
   751  	c.RLock()
   752  	defer c.RUnlock()
   753  
   754  	return c.ShortChannelID
   755  }
   756  
   757  // ChanStatus returns the current ChannelStatus of this channel.
   758  func (c *OpenChannel) ChanStatus() ChannelStatus {
   759  	c.RLock()
   760  	defer c.RUnlock()
   761  
   762  	return c.chanStatus
   763  }
   764  
   765  // ApplyChanStatus allows the caller to modify the internal channel state in a
   766  // thead-safe manner.
   767  func (c *OpenChannel) ApplyChanStatus(status ChannelStatus) error {
   768  	c.Lock()
   769  	defer c.Unlock()
   770  
   771  	return c.putChanStatus(status)
   772  }
   773  
   774  // ClearChanStatus allows the caller to clear a particular channel status from
   775  // the primary channel status bit field. After this method returns, a call to
   776  // HasChanStatus(status) should return false.
   777  func (c *OpenChannel) ClearChanStatus(status ChannelStatus) error {
   778  	c.Lock()
   779  	defer c.Unlock()
   780  
   781  	return c.clearChanStatus(status)
   782  }
   783  
   784  // HasChanStatus returns true if the internal bitfield channel status of the
   785  // target channel has the specified status bit set.
   786  func (c *OpenChannel) HasChanStatus(status ChannelStatus) bool {
   787  	c.RLock()
   788  	defer c.RUnlock()
   789  
   790  	return c.hasChanStatus(status)
   791  }
   792  
   793  func (c *OpenChannel) hasChanStatus(status ChannelStatus) bool {
   794  	// Special case ChanStatusDefualt since it isn't actually flag, but a
   795  	// particular combination (or lack-there-of) of flags.
   796  	if status == ChanStatusDefault {
   797  		return c.chanStatus == ChanStatusDefault
   798  	}
   799  
   800  	return c.chanStatus&status == status
   801  }
   802  
   803  // RefreshShortChanID updates the in-memory channel state using the latest
   804  // value observed on disk.
   805  //
   806  // TODO: the name of this function should be changed to reflect the fact that
   807  // it is not only refreshing the short channel id but all the channel state.
   808  // maybe Refresh/Reload?
   809  func (c *OpenChannel) RefreshShortChanID() error {
   810  	c.Lock()
   811  	defer c.Unlock()
   812  
   813  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
   814  		chanBucket, err := fetchChanBucket(
   815  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
   816  		)
   817  		if err != nil {
   818  			return err
   819  		}
   820  
   821  		// We'll re-populating the in-memory channel with the info
   822  		// fetched from disk.
   823  		if err := fetchChanInfo(chanBucket, c); err != nil {
   824  			return fmt.Errorf("unable to fetch chan info: %v", err)
   825  		}
   826  
   827  		return nil
   828  	}, func() {})
   829  	if err != nil {
   830  		return err
   831  	}
   832  
   833  	return nil
   834  }
   835  
   836  // fetchChanBucket is a helper function that returns the bucket where a
   837  // channel's data resides in given: the public key for the node, the outpoint,
   838  // and the chainhash that the channel resides on.
   839  func fetchChanBucket(tx kvdb.RTx, nodeKey *secp256k1.PublicKey,
   840  	outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RBucket, error) {
   841  
   842  	// First fetch the top level bucket which stores all data related to
   843  	// current, active channels.
   844  	openChanBucket := tx.ReadBucket(openChannelBucket)
   845  	if openChanBucket == nil {
   846  		return nil, ErrNoChanDBExists
   847  	}
   848  
   849  	// TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
   850  	// CreateIfNotExists, will return error
   851  
   852  	// Within this top level bucket, fetch the bucket dedicated to storing
   853  	// open channel data specific to the remote node.
   854  	nodePub := nodeKey.SerializeCompressed()
   855  	nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
   856  	if nodeChanBucket == nil {
   857  		return nil, ErrNoActiveChannels
   858  	}
   859  
   860  	// We'll then recurse down an additional layer in order to fetch the
   861  	// bucket for this particular chain.
   862  	chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:])
   863  	if chainBucket == nil {
   864  		return nil, ErrNoActiveChannels
   865  	}
   866  
   867  	// With the bucket for the node and chain fetched, we can now go down
   868  	// another level, for this channel itself.
   869  	var chanPointBuf bytes.Buffer
   870  	if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
   871  		return nil, err
   872  	}
   873  	chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes())
   874  	if chanBucket == nil {
   875  		return nil, ErrChannelNotFound
   876  	}
   877  
   878  	return chanBucket, nil
   879  }
   880  
   881  // fetchChanBucketRw is a helper function that returns the bucket where a
   882  // channel's data resides in given: the public key for the node, the outpoint,
   883  // and the chainhash that the channel resides on. This differs from
   884  // fetchChanBucket in that it returns a writeable bucket.
   885  func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *secp256k1.PublicKey,
   886  	outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, error) {
   887  
   888  	// First fetch the top level bucket which stores all data related to
   889  	// current, active channels.
   890  	openChanBucket := tx.ReadWriteBucket(openChannelBucket)
   891  	if openChanBucket == nil {
   892  		return nil, ErrNoChanDBExists
   893  	}
   894  
   895  	// TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
   896  	// CreateIfNotExists, will return error
   897  
   898  	// Within this top level bucket, fetch the bucket dedicated to storing
   899  	// open channel data specific to the remote node.
   900  	nodePub := nodeKey.SerializeCompressed()
   901  	nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
   902  	if nodeChanBucket == nil {
   903  		return nil, ErrNoActiveChannels
   904  	}
   905  
   906  	// We'll then recurse down an additional layer in order to fetch the
   907  	// bucket for this particular chain.
   908  	chainBucket := nodeChanBucket.NestedReadWriteBucket(chainHash[:])
   909  	if chainBucket == nil {
   910  		return nil, ErrNoActiveChannels
   911  	}
   912  
   913  	// With the bucket for the node and chain fetched, we can now go down
   914  	// another level, for this channel itself.
   915  	var chanPointBuf bytes.Buffer
   916  	if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
   917  		return nil, err
   918  	}
   919  	chanBucket := chainBucket.NestedReadWriteBucket(chanPointBuf.Bytes())
   920  	if chanBucket == nil {
   921  		return nil, ErrChannelNotFound
   922  	}
   923  
   924  	return chanBucket, nil
   925  }
   926  
   927  // fullSync syncs the contents of an OpenChannel while re-using an existing
   928  // database transaction.
   929  func (c *OpenChannel) fullSync(tx kvdb.RwTx) error {
   930  	// Fetch the outpoint bucket and check if the outpoint already exists.
   931  	opBucket := tx.ReadWriteBucket(outpointBucket)
   932  
   933  	var chanPointBuf bytes.Buffer
   934  	if err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint); err != nil {
   935  		return err
   936  	}
   937  
   938  	// Now, check if the outpoint exists in our index.
   939  	if opBucket.Get(chanPointBuf.Bytes()) != nil {
   940  		return ErrChanAlreadyExists
   941  	}
   942  
   943  	status := uint8(outpointOpen)
   944  
   945  	// Write the status of this outpoint as the first entry in a tlv
   946  	// stream.
   947  	statusRecord := tlv.MakePrimitiveRecord(indexStatusType, &status)
   948  	opStream, err := tlv.NewStream(statusRecord)
   949  	if err != nil {
   950  		return err
   951  	}
   952  
   953  	var b bytes.Buffer
   954  	if err := opStream.Encode(&b); err != nil {
   955  		return err
   956  	}
   957  
   958  	// Add the outpoint to our outpoint index with the tlv stream.
   959  	if err := opBucket.Put(chanPointBuf.Bytes(), b.Bytes()); err != nil {
   960  		return err
   961  	}
   962  
   963  	// First fetch the top level bucket which stores all data related to
   964  	// current, active channels.
   965  	openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
   966  	if err != nil {
   967  		return err
   968  	}
   969  
   970  	// Within this top level bucket, fetch the bucket dedicated to storing
   971  	// open channel data specific to the remote node.
   972  	nodePub := c.IdentityPub.SerializeCompressed()
   973  	nodeChanBucket, err := openChanBucket.CreateBucketIfNotExists(nodePub)
   974  	if err != nil {
   975  		return err
   976  	}
   977  
   978  	// We'll then recurse down an additional layer in order to fetch the
   979  	// bucket for this particular chain.
   980  	chainBucket, err := nodeChanBucket.CreateBucketIfNotExists(c.ChainHash[:])
   981  	if err != nil {
   982  		return err
   983  	}
   984  
   985  	// With the bucket for the node fetched, we can now go down another
   986  	// level, creating the bucket for this channel itself.
   987  	chanBucket, err := chainBucket.CreateBucket(
   988  		chanPointBuf.Bytes(),
   989  	)
   990  	switch {
   991  	case err == kvdb.ErrBucketExists:
   992  		// If this channel already exists, then in order to avoid
   993  		// overriding it, we'll return an error back up to the caller.
   994  		return ErrChanAlreadyExists
   995  	case err != nil:
   996  		return err
   997  	}
   998  
   999  	return putOpenChannel(chanBucket, c)
  1000  }
  1001  
  1002  // MarkAsOpen marks a channel as fully open given a locator that uniquely
  1003  // describes its location within the chain.
  1004  func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
  1005  	c.Lock()
  1006  	defer c.Unlock()
  1007  
  1008  	if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  1009  		chanBucket, err := fetchChanBucketRw(
  1010  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  1011  		)
  1012  		if err != nil {
  1013  			return err
  1014  		}
  1015  
  1016  		channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
  1017  		if err != nil {
  1018  			return err
  1019  		}
  1020  
  1021  		channel.IsPending = false
  1022  		channel.ShortChannelID = openLoc
  1023  
  1024  		return putOpenChannel(chanBucket, channel)
  1025  	}, func() {}); err != nil {
  1026  		return err
  1027  	}
  1028  
  1029  	c.IsPending = false
  1030  	c.ShortChannelID = openLoc
  1031  	c.Packager = NewChannelPackager(openLoc)
  1032  
  1033  	return nil
  1034  }
  1035  
  1036  // MarkDataLoss marks sets the channel status to LocalDataLoss and stores the
  1037  // passed commitPoint for use to retrieve funds in case the remote force closes
  1038  // the channel.
  1039  func (c *OpenChannel) MarkDataLoss(commitPoint *secp256k1.PublicKey) error {
  1040  	c.Lock()
  1041  	defer c.Unlock()
  1042  
  1043  	var b bytes.Buffer
  1044  	if err := WriteElement(&b, commitPoint); err != nil {
  1045  		return err
  1046  	}
  1047  
  1048  	putCommitPoint := func(chanBucket kvdb.RwBucket) error {
  1049  		return chanBucket.Put(dataLossCommitPointKey, b.Bytes())
  1050  	}
  1051  
  1052  	return c.putChanStatus(ChanStatusLocalDataLoss, putCommitPoint)
  1053  }
  1054  
  1055  // DataLossCommitPoint retrieves the stored commit point set during
  1056  // MarkDataLoss. If not found ErrNoCommitPoint is returned.
  1057  func (c *OpenChannel) DataLossCommitPoint() (*secp256k1.PublicKey, error) {
  1058  	var commitPoint *secp256k1.PublicKey
  1059  
  1060  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  1061  		chanBucket, err := fetchChanBucket(
  1062  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  1063  		)
  1064  		switch err {
  1065  		case nil:
  1066  		case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
  1067  			return ErrNoCommitPoint
  1068  		default:
  1069  			return err
  1070  		}
  1071  
  1072  		bs := chanBucket.Get(dataLossCommitPointKey)
  1073  		if bs == nil {
  1074  			return ErrNoCommitPoint
  1075  		}
  1076  		r := bytes.NewReader(bs)
  1077  		if err := ReadElements(r, &commitPoint); err != nil {
  1078  			return err
  1079  		}
  1080  
  1081  		return nil
  1082  	}, func() {
  1083  		commitPoint = nil
  1084  	})
  1085  	if err != nil {
  1086  		return nil, err
  1087  	}
  1088  
  1089  	return commitPoint, nil
  1090  }
  1091  
  1092  // MarkBorked marks the event when the channel as reached an irreconcilable
  1093  // state, such as a channel breach or state desynchronization. Borked channels
  1094  // should never be added to the switch.
  1095  func (c *OpenChannel) MarkBorked() error {
  1096  	c.Lock()
  1097  	defer c.Unlock()
  1098  
  1099  	return c.putChanStatus(ChanStatusBorked)
  1100  }
  1101  
  1102  // ChanSyncMsg returns the ChannelReestablish message that should be sent upon
  1103  // reconnection with the remote peer that we're maintaining this channel with.
  1104  // The information contained within this message is necessary to re-sync our
  1105  // commitment chains in the case of a last or only partially processed message.
  1106  // When the remote party receiver this message one of three things may happen:
  1107  //
  1108  //  1. We're fully synced and no messages need to be sent.
  1109  //  2. We didn't get the last CommitSig message they sent, to they'll re-send
  1110  //     it.
  1111  //  3. We didn't get the last RevokeAndAck message they sent, so they'll
  1112  //     re-send it.
  1113  //
  1114  // If this is a restored channel, having status ChanStatusRestored, then we'll
  1115  // modify our typical chan sync message to ensure they force close even if
  1116  // we're on the very first state.
  1117  func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) {
  1118  	c.Lock()
  1119  	defer c.Unlock()
  1120  
  1121  	// The remote commitment height that we'll send in the
  1122  	// ChannelReestablish message is our current commitment height plus
  1123  	// one. If the receiver thinks that our commitment height is actually
  1124  	// *equal* to this value, then they'll re-send the last commitment that
  1125  	// they sent but we never fully processed.
  1126  	localHeight := c.LocalCommitment.CommitHeight
  1127  	nextLocalCommitHeight := localHeight + 1
  1128  
  1129  	// The second value we'll send is the height of the remote commitment
  1130  	// from our PoV. If the receiver thinks that their height is actually
  1131  	// *one plus* this value, then they'll re-send their last revocation.
  1132  	remoteChainTipHeight := c.RemoteCommitment.CommitHeight
  1133  
  1134  	// If this channel has undergone a commitment update, then in order to
  1135  	// prove to the remote party our knowledge of their prior commitment
  1136  	// state, we'll also send over the last commitment secret that the
  1137  	// remote party sent.
  1138  	var lastCommitSecret [32]byte
  1139  	if remoteChainTipHeight != 0 {
  1140  		remoteSecret, err := c.RevocationStore.LookUp(
  1141  			remoteChainTipHeight - 1,
  1142  		)
  1143  		if err != nil {
  1144  			return nil, err
  1145  		}
  1146  		lastCommitSecret = [32]byte(*remoteSecret)
  1147  	}
  1148  
  1149  	// Additionally, we'll send over the current unrevoked commitment on
  1150  	// our local commitment transaction.
  1151  	currentCommitSecret, err := c.RevocationProducer.AtIndex(
  1152  		localHeight,
  1153  	)
  1154  	if err != nil {
  1155  		return nil, err
  1156  	}
  1157  
  1158  	// If we've restored this channel, then we'll purposefully give them an
  1159  	// invalid LocalUnrevokedCommitPoint so they'll force close the channel
  1160  	// allowing us to sweep our funds.
  1161  	if c.hasChanStatus(ChanStatusRestored) {
  1162  		currentCommitSecret[0] ^= 1
  1163  
  1164  		// If this is a tweakless channel, then we'll purposefully send
  1165  		// a next local height taht's invalid to trigger a force close
  1166  		// on their end. We do this as tweakless channels don't require
  1167  		// that the commitment point is valid, only that it's present.
  1168  		if c.ChanType.IsTweakless() {
  1169  			nextLocalCommitHeight = 0
  1170  		}
  1171  	}
  1172  
  1173  	return &lnwire.ChannelReestablish{
  1174  		ChanID: lnwire.NewChanIDFromOutPoint(
  1175  			&c.FundingOutpoint,
  1176  		),
  1177  		NextLocalCommitHeight:  nextLocalCommitHeight,
  1178  		RemoteCommitTailHeight: remoteChainTipHeight,
  1179  		LastRemoteCommitSecret: lastCommitSecret,
  1180  		LocalUnrevokedCommitPoint: input.ComputeCommitmentPoint(
  1181  			currentCommitSecret[:],
  1182  		),
  1183  	}, nil
  1184  }
  1185  
  1186  // isBorked returns true if the channel has been marked as borked in the
  1187  // database. This requires an existing database transaction to already be
  1188  // active.
  1189  //
  1190  // NOTE: The primary mutex should already be held before this method is called.
  1191  func (c *OpenChannel) isBorked(chanBucket kvdb.RBucket) (bool, error) {
  1192  	channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
  1193  	if err != nil {
  1194  		return false, err
  1195  	}
  1196  
  1197  	return channel.chanStatus != ChanStatusDefault, nil
  1198  }
  1199  
  1200  // MarkCommitmentBroadcasted marks the channel as a commitment transaction has
  1201  // been broadcast, either our own or the remote, and we should watch the chain
  1202  // for it to confirm before taking any further action. It takes as argument the
  1203  // closing tx _we believe_ will appear in the chain. This is only used to
  1204  // republish this tx at startup to ensure propagation, and we should still
  1205  // handle the case where a different tx actually hits the chain.
  1206  func (c *OpenChannel) MarkCommitmentBroadcasted(closeTx *wire.MsgTx,
  1207  	locallyInitiated bool) error {
  1208  
  1209  	return c.markBroadcasted(
  1210  		ChanStatusCommitBroadcasted, forceCloseTxKey, closeTx,
  1211  		locallyInitiated,
  1212  	)
  1213  }
  1214  
  1215  // MarkCoopBroadcasted marks the channel to indicate that a cooperative close
  1216  // transaction has been broadcast, either our own or the remote, and that we
  1217  // should watch the chain for it to confirm before taking further action. It
  1218  // takes as argument a cooperative close tx that could appear on chain, and
  1219  // should be rebroadcast upon startup. This is only used to republish and
  1220  // ensure propagation, and we should still handle the case where a different tx
  1221  // actually hits the chain.
  1222  func (c *OpenChannel) MarkCoopBroadcasted(closeTx *wire.MsgTx,
  1223  	locallyInitiated bool) error {
  1224  
  1225  	return c.markBroadcasted(
  1226  		ChanStatusCoopBroadcasted, coopCloseTxKey, closeTx,
  1227  		locallyInitiated,
  1228  	)
  1229  }
  1230  
  1231  // markBroadcasted is a helper function which modifies the channel status of the
  1232  // receiving channel and inserts a close transaction under the requested key,
  1233  // which should specify either a coop or force close. It adds a status which
  1234  // indicates the party that initiated the channel close.
  1235  func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte,
  1236  	closeTx *wire.MsgTx, locallyInitiated bool) error {
  1237  
  1238  	c.Lock()
  1239  	defer c.Unlock()
  1240  
  1241  	// If a closing tx is provided, we'll generate a closure to write the
  1242  	// transaction in the appropriate bucket under the given key.
  1243  	var putClosingTx func(kvdb.RwBucket) error
  1244  	if closeTx != nil {
  1245  		var b bytes.Buffer
  1246  		if err := WriteElement(&b, closeTx); err != nil {
  1247  			return err
  1248  		}
  1249  
  1250  		putClosingTx = func(chanBucket kvdb.RwBucket) error {
  1251  			return chanBucket.Put(key, b.Bytes())
  1252  		}
  1253  	}
  1254  
  1255  	// Add the initiator status to the status provided. These statuses are
  1256  	// set in addition to the broadcast status so that we do not need to
  1257  	// migrate the original logic which does not store initiator.
  1258  	if locallyInitiated {
  1259  		status |= ChanStatusLocalCloseInitiator
  1260  	} else {
  1261  		status |= ChanStatusRemoteCloseInitiator
  1262  	}
  1263  
  1264  	return c.putChanStatus(status, putClosingTx)
  1265  }
  1266  
  1267  // BroadcastedCommitment retrieves the stored unilateral closing tx set during
  1268  // MarkCommitmentBroadcasted. If not found ErrNoCloseTx is returned.
  1269  func (c *OpenChannel) BroadcastedCommitment() (*wire.MsgTx, error) {
  1270  	return c.getClosingTx(forceCloseTxKey)
  1271  }
  1272  
  1273  // BroadcastedCooperative retrieves the stored cooperative closing tx set during
  1274  // MarkCoopBroadcasted. If not found ErrNoCloseTx is returned.
  1275  func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, error) {
  1276  	return c.getClosingTx(coopCloseTxKey)
  1277  }
  1278  
  1279  // getClosingTx is a helper method which returns the stored closing transaction
  1280  // for key. The caller should use either the force or coop closing keys.
  1281  func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
  1282  	var closeTx *wire.MsgTx
  1283  
  1284  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  1285  		chanBucket, err := fetchChanBucket(
  1286  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  1287  		)
  1288  		switch err {
  1289  		case nil:
  1290  		case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
  1291  			return ErrNoCloseTx
  1292  		default:
  1293  			return err
  1294  		}
  1295  
  1296  		bs := chanBucket.Get(key)
  1297  		if bs == nil {
  1298  			return ErrNoCloseTx
  1299  		}
  1300  		r := bytes.NewReader(bs)
  1301  		return ReadElement(r, &closeTx)
  1302  	}, func() {
  1303  		closeTx = nil
  1304  	})
  1305  	if err != nil {
  1306  		return nil, err
  1307  	}
  1308  
  1309  	return closeTx, nil
  1310  }
  1311  
  1312  // putChanStatus appends the given status to the channel. fs is an optional
  1313  // list of closures that are given the chanBucket in order to atomically add
  1314  // extra information together with the new status.
  1315  func (c *OpenChannel) putChanStatus(status ChannelStatus,
  1316  	fs ...func(kvdb.RwBucket) error) error {
  1317  
  1318  	if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  1319  		chanBucket, err := fetchChanBucketRw(
  1320  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  1321  		)
  1322  		if err != nil {
  1323  			return err
  1324  		}
  1325  
  1326  		channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
  1327  		if err != nil {
  1328  			return err
  1329  		}
  1330  
  1331  		// Add this status to the existing bitvector found in the DB.
  1332  		status = channel.chanStatus | status
  1333  		channel.chanStatus = status
  1334  
  1335  		if err := putOpenChannel(chanBucket, channel); err != nil {
  1336  			return err
  1337  		}
  1338  
  1339  		for _, f := range fs {
  1340  			// Skip execution of nil closures.
  1341  			if f == nil {
  1342  				continue
  1343  			}
  1344  
  1345  			if err := f(chanBucket); err != nil {
  1346  				return err
  1347  			}
  1348  		}
  1349  
  1350  		return nil
  1351  	}, func() {}); err != nil {
  1352  		return err
  1353  	}
  1354  
  1355  	// Update the in-memory representation to keep it in sync with the DB.
  1356  	c.chanStatus = status
  1357  
  1358  	return nil
  1359  }
  1360  
  1361  func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
  1362  	if err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  1363  		chanBucket, err := fetchChanBucketRw(
  1364  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  1365  		)
  1366  		if err != nil {
  1367  			return err
  1368  		}
  1369  
  1370  		channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
  1371  		if err != nil {
  1372  			return err
  1373  		}
  1374  
  1375  		// Unset this bit in the bitvector on disk.
  1376  		status = channel.chanStatus & ^status
  1377  		channel.chanStatus = status
  1378  
  1379  		return putOpenChannel(chanBucket, channel)
  1380  	}, func() {}); err != nil {
  1381  		return err
  1382  	}
  1383  
  1384  	// Update the in-memory representation to keep it in sync with the DB.
  1385  	c.chanStatus = status
  1386  
  1387  	return nil
  1388  }
  1389  
  1390  // putOpenChannel serializes, and stores the current state of the channel in its
  1391  // entirety.
  1392  func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
  1393  	// First, we'll write out all the relatively static fields, that are
  1394  	// decided upon initial channel creation.
  1395  	if err := putChanInfo(chanBucket, channel); err != nil {
  1396  		return fmt.Errorf("unable to store chan info: %v", err)
  1397  	}
  1398  
  1399  	// With the static channel info written out, we'll now write out the
  1400  	// current commitment state for both parties.
  1401  	if err := putChanCommitments(chanBucket, channel); err != nil {
  1402  		return fmt.Errorf("unable to store chan commitments: %v", err)
  1403  	}
  1404  
  1405  	// Next, if this is a frozen channel, we'll add in the axillary
  1406  	// information we need to store.
  1407  	if channel.ChanType.IsFrozen() || channel.ChanType.HasLeaseExpiration() {
  1408  		err := storeThawHeight(
  1409  			chanBucket, channel.ThawHeight,
  1410  		)
  1411  		if err != nil {
  1412  			return fmt.Errorf("unable to store thaw height: %v", err)
  1413  		}
  1414  	}
  1415  
  1416  	// Finally, we'll write out the revocation state for both parties
  1417  	// within a distinct key space.
  1418  	if err := putChanRevocationState(chanBucket, channel); err != nil {
  1419  		return fmt.Errorf("unable to store chan revocations: %v", err)
  1420  	}
  1421  
  1422  	return nil
  1423  }
  1424  
  1425  // fetchOpenChannel retrieves, and deserializes (including decrypting
  1426  // sensitive) the complete channel currently active with the passed nodeID.
  1427  func fetchOpenChannel(chanBucket kvdb.RBucket,
  1428  	chanPoint *wire.OutPoint) (*OpenChannel, error) {
  1429  
  1430  	channel := &OpenChannel{
  1431  		FundingOutpoint: *chanPoint,
  1432  	}
  1433  
  1434  	// First, we'll read all the static information that changes less
  1435  	// frequently from disk.
  1436  	if err := fetchChanInfo(chanBucket, channel); err != nil {
  1437  		return nil, fmt.Errorf("unable to fetch chan info: %v", err)
  1438  	}
  1439  
  1440  	// With the static information read, we'll now read the current
  1441  	// commitment state for both sides of the channel.
  1442  	if err := fetchChanCommitments(chanBucket, channel); err != nil {
  1443  		return nil, fmt.Errorf("unable to fetch chan commitments: %v", err)
  1444  	}
  1445  
  1446  	// Next, if this is a frozen channel, we'll add in the axillary
  1447  	// information we need to store.
  1448  	if channel.ChanType.IsFrozen() || channel.ChanType.HasLeaseExpiration() {
  1449  		thawHeight, err := fetchThawHeight(chanBucket)
  1450  		if err != nil {
  1451  			return nil, fmt.Errorf("unable to store thaw "+
  1452  				"height: %v", err)
  1453  		}
  1454  
  1455  		channel.ThawHeight = thawHeight
  1456  	}
  1457  
  1458  	// Finally, we'll retrieve the current revocation state so we can
  1459  	// properly
  1460  	if err := fetchChanRevocationState(chanBucket, channel); err != nil {
  1461  		return nil, fmt.Errorf("unable to fetch chan revocations: %v", err)
  1462  	}
  1463  
  1464  	channel.Packager = NewChannelPackager(channel.ShortChannelID)
  1465  
  1466  	return channel, nil
  1467  }
  1468  
  1469  // SyncPending writes the contents of the channel to the database while it's in
  1470  // the pending (waiting for funding confirmation) state. The IsPending flag
  1471  // will be set to true. When the channel's funding transaction is confirmed,
  1472  // the channel should be marked as "open" and the IsPending flag set to false.
  1473  // Note that this function also creates a LinkNode relationship between this
  1474  // newly created channel and a new LinkNode instance. This allows listing all
  1475  // channels in the database globally, or according to the LinkNode they were
  1476  // created with.
  1477  //
  1478  // TODO(roasbeef): addr param should eventually be an lnwire.NetAddress type
  1479  // that includes service bits.
  1480  func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) error {
  1481  	c.Lock()
  1482  	defer c.Unlock()
  1483  
  1484  	c.FundingBroadcastHeight = pendingHeight
  1485  
  1486  	return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  1487  		return syncNewChannel(tx, c, []net.Addr{addr})
  1488  	}, func() {})
  1489  }
  1490  
  1491  // syncNewChannel will write the passed channel to disk, and also create a
  1492  // LinkNode (if needed) for the channel peer.
  1493  func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) error {
  1494  	// First, sync all the persistent channel state to disk.
  1495  	if err := c.fullSync(tx); err != nil {
  1496  		return err
  1497  	}
  1498  
  1499  	nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket)
  1500  	if err != nil {
  1501  		return err
  1502  	}
  1503  
  1504  	// If a LinkNode for this identity public key already exists,
  1505  	// then we can exit early.
  1506  	nodePub := c.IdentityPub.SerializeCompressed()
  1507  	if nodeInfoBucket.Get(nodePub) != nil {
  1508  		return nil
  1509  	}
  1510  
  1511  	// Next, we need to establish a (possibly) new LinkNode relationship
  1512  	// for this channel. The LinkNode metadata contains reachability,
  1513  	// up-time, and service bits related information.
  1514  	linkNode := NewLinkNode(
  1515  		&LinkNodeDB{backend: c.Db.backend},
  1516  		wire.MainNet, c.IdentityPub, addrs...,
  1517  	)
  1518  
  1519  	// TODO(roasbeef): do away with link node all together?
  1520  
  1521  	return putLinkNode(nodeInfoBucket, linkNode)
  1522  }
  1523  
  1524  // UpdateCommitment updates the local commitment state. It locks in the pending
  1525  // local updates that were received by us from the remote party. The commitment
  1526  // state completely describes the balance state at this point in the commitment
  1527  // chain. In addition to that, it persists all the remote log updates that we
  1528  // have acked, but not signed a remote commitment for yet. These need to be
  1529  // persisted to be able to produce a valid commit signature if a restart would
  1530  // occur. This method its to be called when we revoke our prior commitment
  1531  // state.
  1532  func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
  1533  	unsignedAckedUpdates []LogUpdate) error {
  1534  
  1535  	c.Lock()
  1536  	defer c.Unlock()
  1537  
  1538  	// If this is a restored channel, then we want to avoid mutating the
  1539  	// state as all, as it's impossible to do so in a protocol compliant
  1540  	// manner.
  1541  	if c.hasChanStatus(ChanStatusRestored) {
  1542  		return ErrNoRestoredChannelMutation
  1543  	}
  1544  
  1545  	err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  1546  		chanBucket, err := fetchChanBucketRw(
  1547  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  1548  		)
  1549  		if err != nil {
  1550  			return err
  1551  		}
  1552  
  1553  		// If the channel is marked as borked, then for safety reasons,
  1554  		// we shouldn't attempt any further updates.
  1555  		isBorked, err := c.isBorked(chanBucket)
  1556  		if err != nil {
  1557  			return err
  1558  		}
  1559  		if isBorked {
  1560  			return ErrChanBorked
  1561  		}
  1562  
  1563  		if err = putChanInfo(chanBucket, c); err != nil {
  1564  			return fmt.Errorf("unable to store chan info: %v", err)
  1565  		}
  1566  
  1567  		// With the proper bucket fetched, we'll now write the latest
  1568  		// commitment state to disk for the target party.
  1569  		err = putChanCommitment(
  1570  			chanBucket, newCommitment, true,
  1571  		)
  1572  		if err != nil {
  1573  			return fmt.Errorf("unable to store chan "+
  1574  				"revocations: %v", err)
  1575  		}
  1576  
  1577  		// Persist unsigned but acked remote updates that need to be
  1578  		// restored after a restart.
  1579  		var b bytes.Buffer
  1580  		err = serializeLogUpdates(&b, unsignedAckedUpdates)
  1581  		if err != nil {
  1582  			return err
  1583  		}
  1584  
  1585  		err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes())
  1586  		if err != nil {
  1587  			return fmt.Errorf("unable to store dangline remote "+
  1588  				"updates: %v", err)
  1589  		}
  1590  
  1591  		// Since we have just sent the counterparty a revocation, store true
  1592  		// under lastWasRevokeKey.
  1593  		var b2 bytes.Buffer
  1594  		if err := WriteElements(&b2, true); err != nil {
  1595  			return err
  1596  		}
  1597  
  1598  		if err := chanBucket.Put(lastWasRevokeKey, b2.Bytes()); err != nil {
  1599  			return err
  1600  		}
  1601  
  1602  		// Persist the remote unsigned local updates that are not included
  1603  		// in our new commitment.
  1604  		updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey)
  1605  		if updateBytes == nil {
  1606  			return nil
  1607  		}
  1608  
  1609  		r := bytes.NewReader(updateBytes)
  1610  		updates, err := deserializeLogUpdates(r)
  1611  		if err != nil {
  1612  			return err
  1613  		}
  1614  
  1615  		var validUpdates []LogUpdate
  1616  		for _, upd := range updates {
  1617  			// Filter for updates that are not on our local
  1618  			// commitment.
  1619  			if upd.LogIndex >= newCommitment.LocalLogIndex {
  1620  				validUpdates = append(validUpdates, upd)
  1621  			}
  1622  		}
  1623  
  1624  		var b3 bytes.Buffer
  1625  		err = serializeLogUpdates(&b3, validUpdates)
  1626  		if err != nil {
  1627  			return fmt.Errorf("unable to serialize log updates: %v", err)
  1628  		}
  1629  
  1630  		err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b3.Bytes())
  1631  		if err != nil {
  1632  			return fmt.Errorf("unable to restore chanbucket: %v", err)
  1633  		}
  1634  
  1635  		return nil
  1636  	}, func() {})
  1637  	if err != nil {
  1638  		return err
  1639  	}
  1640  
  1641  	c.LocalCommitment = *newCommitment
  1642  
  1643  	return nil
  1644  }
  1645  
  1646  // BalancesAtHeight returns the local and remote balances on our commitment
  1647  // transactions as of a given height.
  1648  //
  1649  // NOTE: these are our balances *after* subtracting the commitment fee and
  1650  // anchor outputs.
  1651  func (c *OpenChannel) BalancesAtHeight(height uint64) (lnwire.MilliAtom,
  1652  	lnwire.MilliAtom, error) {
  1653  
  1654  	if height > c.LocalCommitment.CommitHeight &&
  1655  		height > c.RemoteCommitment.CommitHeight {
  1656  
  1657  		return 0, 0, errHeightNotReached
  1658  	}
  1659  
  1660  	// If our current commit is as the desired height, we can return our
  1661  	// current balances.
  1662  	if c.LocalCommitment.CommitHeight == height {
  1663  		return c.LocalCommitment.LocalBalance,
  1664  			c.LocalCommitment.RemoteBalance, nil
  1665  	}
  1666  
  1667  	// If our current remote commit is at the desired height, we can return
  1668  	// the current balances.
  1669  	if c.RemoteCommitment.CommitHeight == height {
  1670  		return c.RemoteCommitment.LocalBalance,
  1671  			c.RemoteCommitment.RemoteBalance, nil
  1672  	}
  1673  
  1674  	// If we are not currently on the height requested, we need to look up
  1675  	// the previous height to obtain our balances at the given height.
  1676  	commit, err := c.FindPreviousState(height)
  1677  	if err != nil {
  1678  		return 0, 0, err
  1679  	}
  1680  
  1681  	return commit.LocalBalance, commit.RemoteBalance, nil
  1682  }
  1683  
  1684  // ActiveHtlcs returns a slice of HTLC's which are currently active on *both*
  1685  // commitment transactions.
  1686  func (c *OpenChannel) ActiveHtlcs() []HTLC {
  1687  	c.RLock()
  1688  	defer c.RUnlock()
  1689  
  1690  	// We'll only return HTLC's that are locked into *both* commitment
  1691  	// transactions. So we'll iterate through their set of HTLC's to note
  1692  	// which ones are present on their commitment.
  1693  	remoteHtlcs := make(map[[32]byte]struct{})
  1694  	for _, htlc := range c.RemoteCommitment.Htlcs {
  1695  		onionHash := sha256.Sum256(htlc.OnionBlob)
  1696  		remoteHtlcs[onionHash] = struct{}{}
  1697  	}
  1698  
  1699  	// Now that we know which HTLC's they have, we'll only mark the HTLC's
  1700  	// as active if *we* know them as well.
  1701  	activeHtlcs := make([]HTLC, 0, len(remoteHtlcs))
  1702  	for _, htlc := range c.LocalCommitment.Htlcs {
  1703  		onionHash := sha256.Sum256(htlc.OnionBlob)
  1704  		if _, ok := remoteHtlcs[onionHash]; !ok {
  1705  			continue
  1706  		}
  1707  
  1708  		activeHtlcs = append(activeHtlcs, htlc)
  1709  	}
  1710  
  1711  	return activeHtlcs
  1712  }
  1713  
  1714  // HTLC is the on-disk representation of a hash time-locked contract. HTLCs are
  1715  // contained within ChannelDeltas which encode the current state of the
  1716  // commitment between state updates.
  1717  //
  1718  // TODO(roasbeef): save space by using smaller ints at tail end?
  1719  type HTLC struct {
  1720  	// Signature is the signature for the second level covenant transaction
  1721  	// for this HTLC. The second level transaction is a timeout tx in the
  1722  	// case that this is an outgoing HTLC, and a success tx in the case
  1723  	// that this is an incoming HTLC.
  1724  	//
  1725  	// TODO(roasbeef): make [64]byte instead?
  1726  	Signature []byte
  1727  
  1728  	// RHash is the payment hash of the HTLC.
  1729  	RHash [32]byte
  1730  
  1731  	// Amt is the amount of milli-atoms this HTLC escrows.
  1732  	Amt lnwire.MilliAtom
  1733  
  1734  	// RefundTimeout is the absolute timeout on the HTLC that the sender
  1735  	// must wait before reclaiming the funds in limbo.
  1736  	RefundTimeout uint32
  1737  
  1738  	// OutputIndex is the output index for this particular HTLC output
  1739  	// within the commitment transaction.
  1740  	OutputIndex int32
  1741  
  1742  	// Incoming denotes whether we're the receiver or the sender of this
  1743  	// HTLC.
  1744  	Incoming bool
  1745  
  1746  	// OnionBlob is an opaque blob which is used to complete multi-hop
  1747  	// routing.
  1748  	OnionBlob []byte
  1749  
  1750  	// HtlcIndex is the HTLC counter index of this active, outstanding
  1751  	// HTLC. This differs from the LogIndex, as the HtlcIndex is only
  1752  	// incremented for each offered HTLC, while they LogIndex is
  1753  	// incremented for each update (includes settle+fail).
  1754  	HtlcIndex uint64
  1755  
  1756  	// LogIndex is the cumulative log index of this HTLC. This differs
  1757  	// from the HtlcIndex as this will be incremented for each new log
  1758  	// update added.
  1759  	LogIndex uint64
  1760  }
  1761  
  1762  // SerializeHtlcs writes out the passed set of HTLC's into the passed writer
  1763  // using the current default on-disk serialization format.
  1764  //
  1765  // NOTE: This API is NOT stable, the on-disk format will likely change in the
  1766  // future.
  1767  func SerializeHtlcs(b io.Writer, htlcs ...HTLC) error {
  1768  	numHtlcs := uint16(len(htlcs))
  1769  	if err := WriteElement(b, numHtlcs); err != nil {
  1770  		return err
  1771  	}
  1772  
  1773  	for _, htlc := range htlcs {
  1774  		if err := WriteElements(b,
  1775  			htlc.Signature, htlc.RHash, htlc.Amt, htlc.RefundTimeout,
  1776  			htlc.OutputIndex, htlc.Incoming, htlc.OnionBlob,
  1777  			htlc.HtlcIndex, htlc.LogIndex,
  1778  		); err != nil {
  1779  			return err
  1780  		}
  1781  	}
  1782  
  1783  	return nil
  1784  }
  1785  
  1786  // DeserializeHtlcs attempts to read out a slice of HTLC's from the passed
  1787  // io.Reader. The bytes within the passed reader MUST have been previously
  1788  // written to using the SerializeHtlcs function.
  1789  //
  1790  // NOTE: This API is NOT stable, the on-disk format will likely change in the
  1791  // future.
  1792  func DeserializeHtlcs(r io.Reader) ([]HTLC, error) {
  1793  	var numHtlcs uint16
  1794  	if err := ReadElement(r, &numHtlcs); err != nil {
  1795  		return nil, err
  1796  	}
  1797  
  1798  	var htlcs []HTLC
  1799  	if numHtlcs == 0 {
  1800  		return htlcs, nil
  1801  	}
  1802  
  1803  	htlcs = make([]HTLC, numHtlcs)
  1804  	for i := uint16(0); i < numHtlcs; i++ {
  1805  		if err := ReadElements(r,
  1806  			&htlcs[i].Signature, &htlcs[i].RHash, &htlcs[i].Amt,
  1807  			&htlcs[i].RefundTimeout, &htlcs[i].OutputIndex,
  1808  			&htlcs[i].Incoming, &htlcs[i].OnionBlob,
  1809  			&htlcs[i].HtlcIndex, &htlcs[i].LogIndex,
  1810  		); err != nil {
  1811  			return htlcs, err
  1812  		}
  1813  	}
  1814  
  1815  	return htlcs, nil
  1816  }
  1817  
  1818  // Copy returns a full copy of the target HTLC.
  1819  func (h *HTLC) Copy() HTLC {
  1820  	clone := HTLC{
  1821  		Incoming:      h.Incoming,
  1822  		Amt:           h.Amt,
  1823  		RefundTimeout: h.RefundTimeout,
  1824  		OutputIndex:   h.OutputIndex,
  1825  	}
  1826  	copy(clone.Signature, h.Signature)
  1827  	copy(clone.RHash[:], h.RHash[:])
  1828  
  1829  	return clone
  1830  }
  1831  
  1832  // LogUpdate represents a pending update to the remote commitment chain. The
  1833  // log update may be an add, fail, or settle entry. We maintain this data in
  1834  // order to be able to properly retransmit our proposed state if necessary.
  1835  type LogUpdate struct {
  1836  	// LogIndex is the log index of this proposed commitment update entry.
  1837  	LogIndex uint64
  1838  
  1839  	// UpdateMsg is the update message that was included within our
  1840  	// local update log. The LogIndex value denotes the log index of this
  1841  	// update which will be used when restoring our local update log if
  1842  	// we're left with a dangling update on restart.
  1843  	UpdateMsg lnwire.Message
  1844  }
  1845  
  1846  // serializeLogUpdate writes a log update to the provided io.Writer.
  1847  func serializeLogUpdate(w io.Writer, l *LogUpdate) error {
  1848  	return WriteElements(w, l.LogIndex, l.UpdateMsg)
  1849  }
  1850  
  1851  // deserializeLogUpdate reads a log update from the provided io.Reader.
  1852  func deserializeLogUpdate(r io.Reader) (*LogUpdate, error) {
  1853  	l := &LogUpdate{}
  1854  	if err := ReadElements(r, &l.LogIndex, &l.UpdateMsg); err != nil {
  1855  		return nil, err
  1856  	}
  1857  
  1858  	return l, nil
  1859  }
  1860  
  1861  // CircuitKey is used by a channel to uniquely identify the HTLCs it receives
  1862  // from the switch, and is used to purge our in-memory state of HTLCs that have
  1863  // already been processed by a link. Two list of CircuitKeys are included in
  1864  // each CommitDiff to allow a link to determine which in-memory htlcs directed
  1865  // the opening and closing of circuits in the switch's circuit map.
  1866  type CircuitKey struct {
  1867  	// ChanID is the short chanid indicating the HTLC's origin.
  1868  	//
  1869  	// NOTE: It is fine for this value to be blank, as this indicates a
  1870  	// locally-sourced payment.
  1871  	ChanID lnwire.ShortChannelID
  1872  
  1873  	// HtlcID is the unique htlc index predominately assigned by links,
  1874  	// though can also be assigned by switch in the case of locally-sourced
  1875  	// payments.
  1876  	HtlcID uint64
  1877  }
  1878  
  1879  // SetBytes deserializes the given bytes into this CircuitKey.
  1880  func (k *CircuitKey) SetBytes(bs []byte) error {
  1881  	if len(bs) != 16 {
  1882  		return ErrInvalidCircuitKeyLen
  1883  	}
  1884  
  1885  	k.ChanID = lnwire.NewShortChanIDFromInt(
  1886  		binary.BigEndian.Uint64(bs[:8]))
  1887  	k.HtlcID = binary.BigEndian.Uint64(bs[8:])
  1888  
  1889  	return nil
  1890  }
  1891  
  1892  // Bytes returns the serialized bytes for this circuit key.
  1893  func (k CircuitKey) Bytes() []byte {
  1894  	var bs = make([]byte, 16)
  1895  	binary.BigEndian.PutUint64(bs[:8], k.ChanID.ToUint64())
  1896  	binary.BigEndian.PutUint64(bs[8:], k.HtlcID)
  1897  	return bs
  1898  }
  1899  
  1900  // Encode writes a CircuitKey to the provided io.Writer.
  1901  func (k *CircuitKey) Encode(w io.Writer) error {
  1902  	var scratch [16]byte
  1903  	binary.BigEndian.PutUint64(scratch[:8], k.ChanID.ToUint64())
  1904  	binary.BigEndian.PutUint64(scratch[8:], k.HtlcID)
  1905  
  1906  	_, err := w.Write(scratch[:])
  1907  	return err
  1908  }
  1909  
  1910  // Decode reads a CircuitKey from the provided io.Reader.
  1911  func (k *CircuitKey) Decode(r io.Reader) error {
  1912  	var scratch [16]byte
  1913  
  1914  	if _, err := io.ReadFull(r, scratch[:]); err != nil {
  1915  		return err
  1916  	}
  1917  	k.ChanID = lnwire.NewShortChanIDFromInt(
  1918  		binary.BigEndian.Uint64(scratch[:8]))
  1919  	k.HtlcID = binary.BigEndian.Uint64(scratch[8:])
  1920  
  1921  	return nil
  1922  }
  1923  
  1924  // String returns a string representation of the CircuitKey.
  1925  func (k CircuitKey) String() string {
  1926  	return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID)
  1927  }
  1928  
  1929  // CommitDiff represents the delta needed to apply the state transition between
  1930  // two subsequent commitment states. Given state N and state N+1, one is able
  1931  // to apply the set of messages contained within the CommitDiff to N to arrive
  1932  // at state N+1. Each time a new commitment is extended, we'll write a new
  1933  // commitment (along with the full commitment state) to disk so we can
  1934  // re-transmit the state in the case of a connection loss or message drop.
  1935  type CommitDiff struct {
  1936  	// ChannelCommitment is the full commitment state that one would arrive
  1937  	// at by applying the set of messages contained in the UpdateDiff to
  1938  	// the prior accepted commitment.
  1939  	Commitment ChannelCommitment
  1940  
  1941  	// LogUpdates is the set of messages sent prior to the commitment state
  1942  	// transition in question. Upon reconnection, if we detect that they
  1943  	// don't have the commitment, then we re-send this along with the
  1944  	// proper signature.
  1945  	LogUpdates []LogUpdate
  1946  
  1947  	// CommitSig is the exact CommitSig message that should be sent after
  1948  	// the set of LogUpdates above has been retransmitted. The signatures
  1949  	// within this message should properly cover the new commitment state
  1950  	// and also the HTLC's within the new commitment state.
  1951  	CommitSig *lnwire.CommitSig
  1952  
  1953  	// OpenedCircuitKeys is a set of unique identifiers for any downstream
  1954  	// Add packets included in this commitment txn. After a restart, this
  1955  	// set of htlcs is acked from the link's incoming mailbox to ensure
  1956  	// there isn't an attempt to re-add them to this commitment txn.
  1957  	OpenedCircuitKeys []CircuitKey
  1958  
  1959  	// ClosedCircuitKeys records the unique identifiers for any settle/fail
  1960  	// packets that were resolved by this commitment txn. After a restart,
  1961  	// this is used to ensure those circuits are removed from the circuit
  1962  	// map, and the downstream packets in the link's mailbox are removed.
  1963  	ClosedCircuitKeys []CircuitKey
  1964  
  1965  	// AddAcks specifies the locations (commit height, pkg index) of any
  1966  	// Adds that were failed/settled in this commit diff. This will ack
  1967  	// entries in *this* channel's forwarding packages.
  1968  	//
  1969  	// NOTE: This value is not serialized, it is used to atomically mark the
  1970  	// resolution of adds, such that they will not be reprocessed after a
  1971  	// restart.
  1972  	AddAcks []AddRef
  1973  
  1974  	// SettleFailAcks specifies the locations (chan id, commit height, pkg
  1975  	// index) of any Settles or Fails that were locked into this commit
  1976  	// diff, and originate from *another* channel, i.e. the outgoing link.
  1977  	//
  1978  	// NOTE: This value is not serialized, it is used to atomically acks
  1979  	// settles and fails from the forwarding packages of other channels,
  1980  	// such that they will not be reforwarded internally after a restart.
  1981  	SettleFailAcks []SettleFailRef
  1982  }
  1983  
  1984  // serializeLogUpdates serializes provided list of updates to a stream.
  1985  func serializeLogUpdates(w io.Writer, logUpdates []LogUpdate) error {
  1986  	numUpdates := uint16(len(logUpdates))
  1987  	if err := binary.Write(w, byteOrder, numUpdates); err != nil {
  1988  		return err
  1989  	}
  1990  
  1991  	for _, diff := range logUpdates {
  1992  		err := WriteElements(w, diff.LogIndex, diff.UpdateMsg)
  1993  		if err != nil {
  1994  			return err
  1995  		}
  1996  	}
  1997  
  1998  	return nil
  1999  }
  2000  
  2001  // deserializeLogUpdates deserializes a list of updates from a stream.
  2002  func deserializeLogUpdates(r io.Reader) ([]LogUpdate, error) {
  2003  	var numUpdates uint16
  2004  	if err := binary.Read(r, byteOrder, &numUpdates); err != nil {
  2005  		return nil, err
  2006  	}
  2007  
  2008  	logUpdates := make([]LogUpdate, numUpdates)
  2009  	for i := 0; i < int(numUpdates); i++ {
  2010  		err := ReadElements(r,
  2011  			&logUpdates[i].LogIndex, &logUpdates[i].UpdateMsg,
  2012  		)
  2013  		if err != nil {
  2014  			return nil, err
  2015  		}
  2016  	}
  2017  	return logUpdates, nil
  2018  }
  2019  
  2020  func serializeCommitDiff(w io.Writer, diff *CommitDiff) error { // nolint: dupl
  2021  	if err := serializeChanCommit(w, &diff.Commitment); err != nil {
  2022  		return err
  2023  	}
  2024  
  2025  	if err := WriteElements(w, diff.CommitSig); err != nil {
  2026  		return err
  2027  	}
  2028  
  2029  	if err := serializeLogUpdates(w, diff.LogUpdates); err != nil {
  2030  		return err
  2031  	}
  2032  
  2033  	numOpenRefs := uint16(len(diff.OpenedCircuitKeys))
  2034  	if err := binary.Write(w, byteOrder, numOpenRefs); err != nil {
  2035  		return err
  2036  	}
  2037  
  2038  	for _, openRef := range diff.OpenedCircuitKeys {
  2039  		err := WriteElements(w, openRef.ChanID, openRef.HtlcID)
  2040  		if err != nil {
  2041  			return err
  2042  		}
  2043  	}
  2044  
  2045  	numClosedRefs := uint16(len(diff.ClosedCircuitKeys))
  2046  	if err := binary.Write(w, byteOrder, numClosedRefs); err != nil {
  2047  		return err
  2048  	}
  2049  
  2050  	for _, closedRef := range diff.ClosedCircuitKeys {
  2051  		err := WriteElements(w, closedRef.ChanID, closedRef.HtlcID)
  2052  		if err != nil {
  2053  			return err
  2054  		}
  2055  	}
  2056  
  2057  	return nil
  2058  }
  2059  
  2060  func deserializeCommitDiff(r io.Reader) (*CommitDiff, error) {
  2061  	var (
  2062  		d   CommitDiff
  2063  		err error
  2064  	)
  2065  
  2066  	d.Commitment, err = deserializeChanCommit(r)
  2067  	if err != nil {
  2068  		return nil, err
  2069  	}
  2070  
  2071  	var msg lnwire.Message
  2072  	if err := ReadElements(r, &msg); err != nil {
  2073  		return nil, err
  2074  	}
  2075  	commitSig, ok := msg.(*lnwire.CommitSig)
  2076  	if !ok {
  2077  		return nil, fmt.Errorf("expected lnwire.CommitSig, instead "+
  2078  			"read: %T", msg)
  2079  	}
  2080  	d.CommitSig = commitSig
  2081  
  2082  	d.LogUpdates, err = deserializeLogUpdates(r)
  2083  	if err != nil {
  2084  		return nil, err
  2085  	}
  2086  
  2087  	var numOpenRefs uint16
  2088  	if err := binary.Read(r, byteOrder, &numOpenRefs); err != nil {
  2089  		return nil, err
  2090  	}
  2091  
  2092  	d.OpenedCircuitKeys = make([]CircuitKey, numOpenRefs)
  2093  	for i := 0; i < int(numOpenRefs); i++ {
  2094  		err := ReadElements(r,
  2095  			&d.OpenedCircuitKeys[i].ChanID,
  2096  			&d.OpenedCircuitKeys[i].HtlcID)
  2097  		if err != nil {
  2098  			return nil, err
  2099  		}
  2100  	}
  2101  
  2102  	var numClosedRefs uint16
  2103  	if err := binary.Read(r, byteOrder, &numClosedRefs); err != nil {
  2104  		return nil, err
  2105  	}
  2106  
  2107  	d.ClosedCircuitKeys = make([]CircuitKey, numClosedRefs)
  2108  	for i := 0; i < int(numClosedRefs); i++ {
  2109  		err := ReadElements(r,
  2110  			&d.ClosedCircuitKeys[i].ChanID,
  2111  			&d.ClosedCircuitKeys[i].HtlcID)
  2112  		if err != nil {
  2113  			return nil, err
  2114  		}
  2115  	}
  2116  
  2117  	return &d, nil
  2118  }
  2119  
  2120  // AppendRemoteCommitChain appends a new CommitDiff to the end of the
  2121  // commitment chain for the remote party. This method is to be used once we
  2122  // have prepared a new commitment state for the remote party, but before we
  2123  // transmit it to the remote party. The contents of the argument should be
  2124  // sufficient to retransmit the updates and signature needed to reconstruct the
  2125  // state in full, in the case that we need to retransmit.
  2126  func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
  2127  	c.Lock()
  2128  	defer c.Unlock()
  2129  
  2130  	// If this is a restored channel, then we want to avoid mutating the
  2131  	// state at all, as it's impossible to do so in a protocol compliant
  2132  	// manner.
  2133  	if c.hasChanStatus(ChanStatusRestored) {
  2134  		return ErrNoRestoredChannelMutation
  2135  	}
  2136  
  2137  	return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2138  		// First, we'll grab the writable bucket where this channel's
  2139  		// data resides.
  2140  		chanBucket, err := fetchChanBucketRw(
  2141  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2142  		)
  2143  		if err != nil {
  2144  			return err
  2145  		}
  2146  
  2147  		// If the channel is marked as borked, then for safety reasons,
  2148  		// we shouldn't attempt any further updates.
  2149  		isBorked, err := c.isBorked(chanBucket)
  2150  		if err != nil {
  2151  			return err
  2152  		}
  2153  		if isBorked {
  2154  			return ErrChanBorked
  2155  		}
  2156  
  2157  		// Any outgoing settles and fails necessarily have a
  2158  		// corresponding adds in this channel's forwarding packages.
  2159  		// Mark all of these as being fully processed in our forwarding
  2160  		// package, which prevents us from reprocessing them after
  2161  		// startup.
  2162  		err = c.Packager.AckAddHtlcs(tx, diff.AddAcks...)
  2163  		if err != nil {
  2164  			return err
  2165  		}
  2166  
  2167  		// Additionally, we ack from any fails or settles that are
  2168  		// persisted in another channel's forwarding package. This
  2169  		// prevents the same fails and settles from being retransmitted
  2170  		// after restarts. The actual fail or settle we need to
  2171  		// propagate to the remote party is now in the commit diff.
  2172  		err = c.Packager.AckSettleFails(tx, diff.SettleFailAcks...)
  2173  		if err != nil {
  2174  			return err
  2175  		}
  2176  
  2177  		// We are sending a commitment signature so lastWasRevokeKey should
  2178  		// store false.
  2179  		var b bytes.Buffer
  2180  		if err := WriteElements(&b, false); err != nil {
  2181  			return err
  2182  		}
  2183  		if err := chanBucket.Put(lastWasRevokeKey, b.Bytes()); err != nil {
  2184  			return err
  2185  		}
  2186  
  2187  		// TODO(roasbeef): use seqno to derive key for later LCP
  2188  
  2189  		// With the bucket retrieved, we'll now serialize the commit
  2190  		// diff itself, and write it to disk.
  2191  		var b2 bytes.Buffer
  2192  		if err := serializeCommitDiff(&b2, diff); err != nil {
  2193  			return err
  2194  		}
  2195  		return chanBucket.Put(commitDiffKey, b2.Bytes())
  2196  	}, func() {})
  2197  }
  2198  
  2199  // RemoteCommitChainTip returns the "tip" of the current remote commitment
  2200  // chain. This value will be non-nil iff, we've created a new commitment for
  2201  // the remote party that they haven't yet ACK'd. In this case, their commitment
  2202  // chain will have a length of two: their current unrevoked commitment, and
  2203  // this new pending commitment. Once they revoked their prior state, we'll swap
  2204  // these pointers, causing the tip and the tail to point to the same entry.
  2205  func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
  2206  	var cd *CommitDiff
  2207  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  2208  		chanBucket, err := fetchChanBucket(
  2209  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2210  		)
  2211  		switch err {
  2212  		case nil:
  2213  		case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
  2214  			return ErrNoPendingCommit
  2215  		default:
  2216  			return err
  2217  		}
  2218  
  2219  		tipBytes := chanBucket.Get(commitDiffKey)
  2220  		if tipBytes == nil {
  2221  			return ErrNoPendingCommit
  2222  		}
  2223  
  2224  		tipReader := bytes.NewReader(tipBytes)
  2225  		dcd, err := deserializeCommitDiff(tipReader)
  2226  		if err != nil {
  2227  			return err
  2228  		}
  2229  
  2230  		cd = dcd
  2231  		return nil
  2232  	}, func() {
  2233  		cd = nil
  2234  	})
  2235  	if err != nil {
  2236  		return nil, err
  2237  	}
  2238  
  2239  	return cd, err
  2240  }
  2241  
  2242  // UnsignedAckedUpdates retrieves the persisted unsigned acked remote log
  2243  // updates that still need to be signed for.
  2244  func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) {
  2245  	var updates []LogUpdate
  2246  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  2247  		chanBucket, err := fetchChanBucket(
  2248  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2249  		)
  2250  		switch err {
  2251  		case nil:
  2252  		case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
  2253  			return nil
  2254  		default:
  2255  			return err
  2256  		}
  2257  
  2258  		updateBytes := chanBucket.Get(unsignedAckedUpdatesKey)
  2259  		if updateBytes == nil {
  2260  			return nil
  2261  		}
  2262  
  2263  		r := bytes.NewReader(updateBytes)
  2264  		updates, err = deserializeLogUpdates(r)
  2265  		return err
  2266  	}, func() {
  2267  		updates = nil
  2268  	})
  2269  	if err != nil {
  2270  		return nil, err
  2271  	}
  2272  
  2273  	return updates, nil
  2274  }
  2275  
  2276  // RemoteUnsignedLocalUpdates retrieves the persisted, unsigned local log
  2277  // updates that the remote still needs to sign for.
  2278  func (c *OpenChannel) RemoteUnsignedLocalUpdates() ([]LogUpdate, error) {
  2279  	var updates []LogUpdate
  2280  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  2281  		chanBucket, err := fetchChanBucket(
  2282  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2283  		)
  2284  		switch err {
  2285  		case nil:
  2286  			break
  2287  		case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound:
  2288  			return nil
  2289  		default:
  2290  			return err
  2291  		}
  2292  
  2293  		updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey)
  2294  		if updateBytes == nil {
  2295  			return nil
  2296  		}
  2297  
  2298  		r := bytes.NewReader(updateBytes)
  2299  		updates, err = deserializeLogUpdates(r)
  2300  		return err
  2301  	}, func() {
  2302  		updates = nil
  2303  	})
  2304  	if err != nil {
  2305  		return nil, err
  2306  	}
  2307  
  2308  	return updates, nil
  2309  }
  2310  
  2311  // InsertNextRevocation inserts the _next_ commitment point (revocation) into
  2312  // the database, and also modifies the internal RemoteNextRevocation attribute
  2313  // to point to the passed key. This method is to be using during final channel
  2314  // set up, _after_ the channel has been fully confirmed.
  2315  //
  2316  // NOTE: If this method isn't called, then the target channel won't be able to
  2317  // propose new states for the commitment state of the remote party.
  2318  func (c *OpenChannel) InsertNextRevocation(revKey *secp256k1.PublicKey) error {
  2319  	c.Lock()
  2320  	defer c.Unlock()
  2321  
  2322  	c.RemoteNextRevocation = revKey
  2323  
  2324  	err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2325  		chanBucket, err := fetchChanBucketRw(
  2326  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2327  		)
  2328  		if err != nil {
  2329  			return err
  2330  		}
  2331  
  2332  		return putChanRevocationState(chanBucket, c)
  2333  	}, func() {})
  2334  	if err != nil {
  2335  		return err
  2336  	}
  2337  
  2338  	return nil
  2339  }
  2340  
  2341  // AdvanceCommitChainTail records the new state transition within an on-disk
  2342  // append-only log which records all state transitions by the remote peer. In
  2343  // the case of an uncooperative broadcast of a prior state by the remote peer,
  2344  // this log can be consulted in order to reconstruct the state needed to
  2345  // rectify the situation. This method will add the current commitment for the
  2346  // remote party to the revocation log, and promote the current pending
  2347  // commitment to the current remote commitment. The updates parameter is the
  2348  // set of local updates that the peer still needs to send us a signature for.
  2349  // We store this set of updates in case we go down.
  2350  func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg,
  2351  	updates []LogUpdate) error {
  2352  
  2353  	c.Lock()
  2354  	defer c.Unlock()
  2355  
  2356  	// If this is a restored channel, then we want to avoid mutating the
  2357  	// state at all, as it's impossible to do so in a protocol compliant
  2358  	// manner.
  2359  	if c.hasChanStatus(ChanStatusRestored) {
  2360  		return ErrNoRestoredChannelMutation
  2361  	}
  2362  
  2363  	var newRemoteCommit *ChannelCommitment
  2364  
  2365  	err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2366  		chanBucket, err := fetchChanBucketRw(
  2367  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2368  		)
  2369  		if err != nil {
  2370  			return err
  2371  		}
  2372  
  2373  		// If the channel is marked as borked, then for safety reasons,
  2374  		// we shouldn't attempt any further updates.
  2375  		isBorked, err := c.isBorked(chanBucket)
  2376  		if err != nil {
  2377  			return err
  2378  		}
  2379  		if isBorked {
  2380  			return ErrChanBorked
  2381  		}
  2382  
  2383  		// Persist the latest preimage state to disk as the remote peer
  2384  		// has just added to our local preimage store, and given us a
  2385  		// new pending revocation key.
  2386  		if err := putChanRevocationState(chanBucket, c); err != nil {
  2387  			return err
  2388  		}
  2389  
  2390  		// With the current preimage producer/store state updated,
  2391  		// append a new log entry recording this the delta of this
  2392  		// state transition.
  2393  		//
  2394  		// TODO(roasbeef): could make the deltas relative, would save
  2395  		// space, but then tradeoff for more disk-seeks to recover the
  2396  		// full state.
  2397  		logKey := revocationLogBucket
  2398  		logBucket, err := chanBucket.CreateBucketIfNotExists(logKey)
  2399  		if err != nil {
  2400  			return err
  2401  		}
  2402  
  2403  		// Before we append this revoked state to the revocation log,
  2404  		// we'll swap out what's currently the tail of the commit tip,
  2405  		// with the current locked-in commitment for the remote party.
  2406  		tipBytes := chanBucket.Get(commitDiffKey)
  2407  		tipReader := bytes.NewReader(tipBytes)
  2408  		newCommit, err := deserializeCommitDiff(tipReader)
  2409  		if err != nil {
  2410  			return err
  2411  		}
  2412  		err = putChanCommitment(
  2413  			chanBucket, &newCommit.Commitment, false,
  2414  		)
  2415  		if err != nil {
  2416  			return err
  2417  		}
  2418  		if err := chanBucket.Delete(commitDiffKey); err != nil {
  2419  			return err
  2420  		}
  2421  
  2422  		// With the commitment pointer swapped, we can now add the
  2423  		// revoked (prior) state to the revocation log.
  2424  		//
  2425  		// TODO(roasbeef): store less
  2426  		err = appendChannelLogEntry(logBucket, &c.RemoteCommitment)
  2427  		if err != nil {
  2428  			return err
  2429  		}
  2430  
  2431  		// Lastly, we write the forwarding package to disk so that we
  2432  		// can properly recover from failures and reforward HTLCs that
  2433  		// have not received a corresponding settle/fail.
  2434  		if err := c.Packager.AddFwdPkg(tx, fwdPkg); err != nil {
  2435  			return err
  2436  		}
  2437  
  2438  		// Persist the unsigned acked updates that are not included
  2439  		// in their new commitment.
  2440  		updateBytes := chanBucket.Get(unsignedAckedUpdatesKey)
  2441  		if updateBytes == nil {
  2442  			// This shouldn't normally happen as we always store
  2443  			// the number of updates, but could still be
  2444  			// encountered by nodes that are upgrading.
  2445  			newRemoteCommit = &newCommit.Commitment
  2446  			return nil
  2447  		}
  2448  
  2449  		r := bytes.NewReader(updateBytes)
  2450  		unsignedUpdates, err := deserializeLogUpdates(r)
  2451  		if err != nil {
  2452  			return err
  2453  		}
  2454  
  2455  		var validUpdates []LogUpdate
  2456  		for _, upd := range unsignedUpdates {
  2457  			lIdx := upd.LogIndex
  2458  
  2459  			// Filter for updates that are not on the remote
  2460  			// commitment.
  2461  			if lIdx >= newCommit.Commitment.RemoteLogIndex {
  2462  				validUpdates = append(validUpdates, upd)
  2463  			}
  2464  		}
  2465  
  2466  		var b bytes.Buffer
  2467  		err = serializeLogUpdates(&b, validUpdates)
  2468  		if err != nil {
  2469  			return fmt.Errorf("unable to serialize log updates: %v", err)
  2470  		}
  2471  
  2472  		err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes())
  2473  		if err != nil {
  2474  			return fmt.Errorf("unable to store under unsignedAckedUpdatesKey: %v", err)
  2475  		}
  2476  
  2477  		// Persist the local updates the peer hasn't yet signed so they
  2478  		// can be restored after restart.
  2479  		var b2 bytes.Buffer
  2480  		err = serializeLogUpdates(&b2, updates)
  2481  		if err != nil {
  2482  			return err
  2483  		}
  2484  
  2485  		err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes())
  2486  		if err != nil {
  2487  			return fmt.Errorf("unable to restore remote unsigned "+
  2488  				"local updates: %v", err)
  2489  		}
  2490  
  2491  		newRemoteCommit = &newCommit.Commitment
  2492  
  2493  		return nil
  2494  	}, func() {
  2495  		newRemoteCommit = nil
  2496  	})
  2497  	if err != nil {
  2498  		return err
  2499  	}
  2500  
  2501  	// With the db transaction complete, we'll swap over the in-memory
  2502  	// pointer of the new remote commitment, which was previously the tip
  2503  	// of the commit chain.
  2504  	c.RemoteCommitment = *newRemoteCommit
  2505  
  2506  	return nil
  2507  }
  2508  
  2509  // NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure
  2510  // this always returns the next index that has been not been allocated, this
  2511  // will first try to examine any pending commitments, before falling back to the
  2512  // last locked-in remote commitment.
  2513  func (c *OpenChannel) NextLocalHtlcIndex() (uint64, error) {
  2514  	// First, load the most recent commit diff that we initiated for the
  2515  	// remote party. If no pending commit is found, this is not treated as
  2516  	// a critical error, since we can always fall back.
  2517  	pendingRemoteCommit, err := c.RemoteCommitChainTip()
  2518  	if err != nil && err != ErrNoPendingCommit {
  2519  		return 0, err
  2520  	}
  2521  
  2522  	// If a pending commit was found, its local htlc index will be at least
  2523  	// as large as the one on our local commitment.
  2524  	if pendingRemoteCommit != nil {
  2525  		return pendingRemoteCommit.Commitment.LocalHtlcIndex, nil
  2526  	}
  2527  
  2528  	// Otherwise, fallback to using the local htlc index of their commitment.
  2529  	return c.RemoteCommitment.LocalHtlcIndex, nil
  2530  }
  2531  
  2532  // LoadFwdPkgs scans the forwarding log for any packages that haven't been
  2533  // processed, and returns their deserialized log updates in map indexed by the
  2534  // remote commitment height at which the updates were locked in.
  2535  func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, error) {
  2536  	c.RLock()
  2537  	defer c.RUnlock()
  2538  
  2539  	var fwdPkgs []*FwdPkg
  2540  	if err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  2541  		var err error
  2542  		fwdPkgs, err = c.Packager.LoadFwdPkgs(tx)
  2543  		return err
  2544  	}, func() {
  2545  		fwdPkgs = nil
  2546  	}); err != nil {
  2547  		return nil, err
  2548  	}
  2549  
  2550  	return fwdPkgs, nil
  2551  }
  2552  
  2553  // AckAddHtlcs updates the AckAddFilter containing any of the provided AddRefs
  2554  // indicating that a response to this Add has been committed to the remote party.
  2555  // Doing so will prevent these Add HTLCs from being reforwarded internally.
  2556  func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) error {
  2557  	c.Lock()
  2558  	defer c.Unlock()
  2559  
  2560  	return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2561  		return c.Packager.AckAddHtlcs(tx, addRefs...)
  2562  	}, func() {})
  2563  }
  2564  
  2565  // AckSettleFails updates the SettleFailFilter containing any of the provided
  2566  // SettleFailRefs, indicating that the response has been delivered to the
  2567  // incoming link, corresponding to a particular AddRef. Doing so will prevent
  2568  // the responses from being retransmitted internally.
  2569  func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) error {
  2570  	c.Lock()
  2571  	defer c.Unlock()
  2572  
  2573  	return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2574  		return c.Packager.AckSettleFails(tx, settleFailRefs...)
  2575  	}, func() {})
  2576  }
  2577  
  2578  // SetFwdFilter atomically sets the forwarding filter for the forwarding package
  2579  // identified by `height`.
  2580  func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) error {
  2581  	c.Lock()
  2582  	defer c.Unlock()
  2583  
  2584  	return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2585  		return c.Packager.SetFwdFilter(tx, height, fwdFilter)
  2586  	}, func() {})
  2587  }
  2588  
  2589  // RemoveFwdPkgs atomically removes forwarding packages specified by the remote
  2590  // commitment heights. If one of the intermediate RemovePkg calls fails, then the
  2591  // later packages won't be removed.
  2592  //
  2593  // NOTE: This method should only be called on packages marked FwdStateCompleted.
  2594  func (c *OpenChannel) RemoveFwdPkgs(heights ...uint64) error {
  2595  	c.Lock()
  2596  	defer c.Unlock()
  2597  
  2598  	return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2599  		for _, height := range heights {
  2600  			err := c.Packager.RemovePkg(tx, height)
  2601  			if err != nil {
  2602  				return err
  2603  			}
  2604  		}
  2605  
  2606  		return nil
  2607  	}, func() {})
  2608  }
  2609  
  2610  // RevocationLogTail returns the "tail", or the end of the current revocation
  2611  // log. This entry represents the last previous state for the remote node's
  2612  // commitment chain. The ChannelDelta returned by this method will always lag
  2613  // one state behind the most current (unrevoked) state of the remote node's
  2614  // commitment chain.
  2615  func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
  2616  	c.RLock()
  2617  	defer c.RUnlock()
  2618  
  2619  	// If we haven't created any state updates yet, then we'll exit early as
  2620  	// there's nothing to be found on disk in the revocation bucket.
  2621  	if c.RemoteCommitment.CommitHeight == 0 {
  2622  		return nil, nil
  2623  	}
  2624  
  2625  	var commit ChannelCommitment
  2626  	if err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  2627  		chanBucket, err := fetchChanBucket(
  2628  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2629  		)
  2630  		if err != nil {
  2631  			return err
  2632  		}
  2633  
  2634  		logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
  2635  		if logBucket == nil {
  2636  			return ErrNoPastDeltas
  2637  		}
  2638  
  2639  		// Once we have the bucket that stores the revocation log from
  2640  		// this channel, we'll jump to the _last_ key in bucket. As we
  2641  		// store the update number on disk in a big-endian format,
  2642  		// this will retrieve the latest entry.
  2643  		cursor := logBucket.ReadCursor()
  2644  		_, tailLogEntry := cursor.Last()
  2645  		logEntryReader := bytes.NewReader(tailLogEntry)
  2646  
  2647  		// Once we have the entry, we'll decode it into the channel
  2648  		// delta pointer we created above.
  2649  		var dbErr error
  2650  		commit, dbErr = deserializeChanCommit(logEntryReader)
  2651  		if dbErr != nil {
  2652  			return dbErr
  2653  		}
  2654  
  2655  		return nil
  2656  	}, func() {}); err != nil {
  2657  		return nil, err
  2658  	}
  2659  
  2660  	return &commit, nil
  2661  }
  2662  
  2663  // CommitmentHeight returns the current commitment height. The commitment
  2664  // height represents the number of updates to the commitment state to date.
  2665  // This value is always monotonically increasing. This method is provided in
  2666  // order to allow multiple instances of a particular open channel to obtain a
  2667  // consistent view of the number of channel updates to date.
  2668  func (c *OpenChannel) CommitmentHeight() (uint64, error) {
  2669  	c.RLock()
  2670  	defer c.RUnlock()
  2671  
  2672  	var height uint64
  2673  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  2674  		// Get the bucket dedicated to storing the metadata for open
  2675  		// channels.
  2676  		chanBucket, err := fetchChanBucket(
  2677  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2678  		)
  2679  		if err != nil {
  2680  			return err
  2681  		}
  2682  
  2683  		commit, err := fetchChanCommitment(chanBucket, true)
  2684  		if err != nil {
  2685  			return err
  2686  		}
  2687  
  2688  		height = commit.CommitHeight
  2689  		return nil
  2690  	}, func() {
  2691  		height = 0
  2692  	})
  2693  	if err != nil {
  2694  		return 0, err
  2695  	}
  2696  
  2697  	return height, nil
  2698  }
  2699  
  2700  // FindPreviousState scans through the append-only log in an attempt to recover
  2701  // the previous channel state indicated by the update number. This method is
  2702  // intended to be used for obtaining the relevant data needed to claim all
  2703  // funds rightfully spendable in the case of an on-chain broadcast of the
  2704  // commitment transaction.
  2705  func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, error) {
  2706  	c.RLock()
  2707  	defer c.RUnlock()
  2708  
  2709  	var commit ChannelCommitment
  2710  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  2711  		chanBucket, err := fetchChanBucket(
  2712  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  2713  		)
  2714  		if err != nil {
  2715  			return err
  2716  		}
  2717  
  2718  		logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
  2719  		if logBucket == nil {
  2720  			return ErrNoPastDeltas
  2721  		}
  2722  
  2723  		c, err := fetchChannelLogEntry(logBucket, updateNum)
  2724  		if err != nil {
  2725  			return err
  2726  		}
  2727  
  2728  		commit = c
  2729  		return nil
  2730  	}, func() {})
  2731  	if err != nil {
  2732  		return nil, err
  2733  	}
  2734  
  2735  	return &commit, nil
  2736  }
  2737  
  2738  // ClosureType is an enum like structure that details exactly _how_ a channel
  2739  // was closed. Three closure types are currently possible: none, cooperative,
  2740  // local force close, remote force close, and (remote) breach.
  2741  type ClosureType uint8
  2742  
  2743  const (
  2744  	// CooperativeClose indicates that a channel has been closed
  2745  	// cooperatively.  This means that both channel peers were online and
  2746  	// signed a new transaction paying out the settled balance of the
  2747  	// contract.
  2748  	CooperativeClose ClosureType = 0
  2749  
  2750  	// LocalForceClose indicates that we have unilaterally broadcast our
  2751  	// current commitment state on-chain.
  2752  	LocalForceClose ClosureType = 1
  2753  
  2754  	// RemoteForceClose indicates that the remote peer has unilaterally
  2755  	// broadcast their current commitment state on-chain.
  2756  	RemoteForceClose ClosureType = 4
  2757  
  2758  	// BreachClose indicates that the remote peer attempted to broadcast a
  2759  	// prior _revoked_ channel state.
  2760  	BreachClose ClosureType = 2
  2761  
  2762  	// FundingCanceled indicates that the channel never was fully opened
  2763  	// before it was marked as closed in the database. This can happen if
  2764  	// we or the remote fail at some point during the opening workflow, or
  2765  	// we timeout waiting for the funding transaction to be confirmed.
  2766  	FundingCanceled ClosureType = 3
  2767  
  2768  	// Abandoned indicates that the channel state was removed without
  2769  	// any further actions. This is intended to clean up unusable
  2770  	// channels during development.
  2771  	Abandoned ClosureType = 5
  2772  )
  2773  
  2774  // ChannelCloseSummary contains the final state of a channel at the point it
  2775  // was closed. Once a channel is closed, all the information pertaining to that
  2776  // channel within the openChannelBucket is deleted, and a compact summary is
  2777  // put in place instead.
  2778  type ChannelCloseSummary struct {
  2779  	// ChanPoint is the outpoint for this channel's funding transaction,
  2780  	// and is used as a unique identifier for the channel.
  2781  	ChanPoint wire.OutPoint
  2782  
  2783  	// ShortChanID encodes the exact location in the chain in which the
  2784  	// channel was initially confirmed. This includes: the block height,
  2785  	// transaction index, and the output within the target transaction.
  2786  	ShortChanID lnwire.ShortChannelID
  2787  
  2788  	// ChainHash is the hash of the genesis block that this channel resides
  2789  	// within.
  2790  	ChainHash chainhash.Hash
  2791  
  2792  	// ClosingTXID is the txid of the transaction which ultimately closed
  2793  	// this channel.
  2794  	ClosingTXID chainhash.Hash
  2795  
  2796  	// RemotePub is the public key of the remote peer that we formerly had
  2797  	// a channel with.
  2798  	RemotePub *secp256k1.PublicKey
  2799  
  2800  	// Capacity was the total capacity of the channel.
  2801  	Capacity dcrutil.Amount
  2802  
  2803  	// CloseHeight is the height at which the funding transaction was
  2804  	// spent.
  2805  	CloseHeight uint32
  2806  
  2807  	// SettledBalance is our total balance settled balance at the time of
  2808  	// channel closure. This _does not_ include the sum of any outputs that
  2809  	// have been time-locked as a result of the unilateral channel closure.
  2810  	SettledBalance dcrutil.Amount
  2811  
  2812  	// TimeLockedBalance is the sum of all the time-locked outputs at the
  2813  	// time of channel closure. If we triggered the force closure of this
  2814  	// channel, then this value will be non-zero if our settled output is
  2815  	// above the dust limit. If we were on the receiving side of a channel
  2816  	// force closure, then this value will be non-zero if we had any
  2817  	// outstanding outgoing HTLC's at the time of channel closure.
  2818  	TimeLockedBalance dcrutil.Amount
  2819  
  2820  	// CloseType details exactly _how_ the channel was closed. Five closure
  2821  	// types are possible: cooperative, local force, remote force, breach
  2822  	// and funding canceled.
  2823  	CloseType ClosureType
  2824  
  2825  	// IsPending indicates whether this channel is in the 'pending close'
  2826  	// state, which means the channel closing transaction has been
  2827  	// confirmed, but not yet been fully resolved. In the case of a channel
  2828  	// that has been cooperatively closed, it will go straight into the
  2829  	// fully resolved state as soon as the closing transaction has been
  2830  	// confirmed. However, for channels that have been force closed, they'll
  2831  	// stay marked as "pending" until _all_ the pending funds have been
  2832  	// swept.
  2833  	IsPending bool
  2834  
  2835  	// RemoteCurrentRevocation is the current revocation for their
  2836  	// commitment transaction. However, since this is the derived public key,
  2837  	// we don't yet have the private key so we aren't yet able to verify
  2838  	// that it's actually in the hash chain.
  2839  	RemoteCurrentRevocation *secp256k1.PublicKey
  2840  
  2841  	// RemoteNextRevocation is the revocation key to be used for the *next*
  2842  	// commitment transaction we create for the local node. Within the
  2843  	// specification, this value is referred to as the
  2844  	// per-commitment-point.
  2845  	RemoteNextRevocation *secp256k1.PublicKey
  2846  
  2847  	// LocalChanCfg is the channel configuration for the local node.
  2848  	LocalChanConfig ChannelConfig
  2849  
  2850  	// LastChanSyncMsg is the ChannelReestablish message for this channel
  2851  	// for the state at the point where it was closed.
  2852  	LastChanSyncMsg *lnwire.ChannelReestablish
  2853  }
  2854  
  2855  // CloseChannel closes a previously active Lightning channel. Closing a channel
  2856  // entails deleting all saved state within the database concerning this
  2857  // channel. This method also takes a struct that summarizes the state of the
  2858  // channel at closing, this compact representation will be the only component
  2859  // of a channel left over after a full closing. It takes an optional set of
  2860  // channel statuses which will be written to the historical channel bucket.
  2861  // These statuses are used to record close initiators.
  2862  func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
  2863  	statuses ...ChannelStatus) error {
  2864  
  2865  	c.Lock()
  2866  	defer c.Unlock()
  2867  
  2868  	return kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
  2869  		openChanBucket := tx.ReadWriteBucket(openChannelBucket)
  2870  		if openChanBucket == nil {
  2871  			return ErrNoChanDBExists
  2872  		}
  2873  
  2874  		nodePub := c.IdentityPub.SerializeCompressed()
  2875  		nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
  2876  		if nodeChanBucket == nil {
  2877  			return ErrNoActiveChannels
  2878  		}
  2879  
  2880  		chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:])
  2881  		if chainBucket == nil {
  2882  			return ErrNoActiveChannels
  2883  		}
  2884  
  2885  		var chanPointBuf bytes.Buffer
  2886  		err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint)
  2887  		if err != nil {
  2888  			return err
  2889  		}
  2890  		chanKey := chanPointBuf.Bytes()
  2891  		chanBucket := chainBucket.NestedReadWriteBucket(
  2892  			chanKey,
  2893  		)
  2894  		if chanBucket == nil {
  2895  			return ErrNoActiveChannels
  2896  		}
  2897  
  2898  		// Before we delete the channel state, we'll read out the full
  2899  		// details, as we'll also store portions of this information
  2900  		// for record keeping.
  2901  		chanState, err := fetchOpenChannel(
  2902  			chanBucket, &c.FundingOutpoint,
  2903  		)
  2904  		if err != nil {
  2905  			return err
  2906  		}
  2907  
  2908  		// Delete all the forwarding packages stored for this particular
  2909  		// channel.
  2910  		if err = chanState.Packager.Wipe(tx); err != nil {
  2911  			return err
  2912  		}
  2913  
  2914  		// Now that the index to this channel has been deleted, purge
  2915  		// the remaining channel metadata from the database.
  2916  		err = deleteOpenChannel(chanBucket)
  2917  		if err != nil {
  2918  			return err
  2919  		}
  2920  
  2921  		// We'll also remove the channel from the frozen channel bucket
  2922  		// if we need to.
  2923  		if c.ChanType.IsFrozen() || c.ChanType.HasLeaseExpiration() {
  2924  			err := deleteThawHeight(chanBucket)
  2925  			if err != nil {
  2926  				return err
  2927  			}
  2928  		}
  2929  
  2930  		// With the base channel data deleted, attempt to delete the
  2931  		// information stored within the revocation log.
  2932  		logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket)
  2933  		if logBucket != nil {
  2934  			err = chanBucket.DeleteNestedBucket(revocationLogBucket)
  2935  			if err != nil {
  2936  				return err
  2937  			}
  2938  		}
  2939  
  2940  		err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes())
  2941  		if err != nil {
  2942  			return err
  2943  		}
  2944  
  2945  		// Fetch the outpoint bucket to see if the outpoint exists or
  2946  		// not.
  2947  		opBucket := tx.ReadWriteBucket(outpointBucket)
  2948  
  2949  		// Add the closed outpoint to our outpoint index. This should
  2950  		// replace an open outpoint in the index.
  2951  		if opBucket.Get(chanPointBuf.Bytes()) == nil {
  2952  			return ErrMissingIndexEntry
  2953  		}
  2954  
  2955  		status := uint8(outpointClosed)
  2956  
  2957  		// Write the IndexStatus of this outpoint as the first entry in a tlv
  2958  		// stream.
  2959  		statusRecord := tlv.MakePrimitiveRecord(indexStatusType, &status)
  2960  		opStream, err := tlv.NewStream(statusRecord)
  2961  		if err != nil {
  2962  			return err
  2963  		}
  2964  
  2965  		var b bytes.Buffer
  2966  		if err := opStream.Encode(&b); err != nil {
  2967  			return err
  2968  		}
  2969  
  2970  		// Finally add the closed outpoint and tlv stream to the index.
  2971  		if err := opBucket.Put(chanPointBuf.Bytes(), b.Bytes()); err != nil {
  2972  			return err
  2973  		}
  2974  
  2975  		// Add channel state to the historical channel bucket.
  2976  		historicalBucket, err := tx.CreateTopLevelBucket(
  2977  			historicalChannelBucket,
  2978  		)
  2979  		if err != nil {
  2980  			return err
  2981  		}
  2982  
  2983  		historicalChanBucket, err :=
  2984  			historicalBucket.CreateBucketIfNotExists(chanKey)
  2985  		if err != nil {
  2986  			return err
  2987  		}
  2988  
  2989  		// Apply any additional statuses to the channel state.
  2990  		for _, status := range statuses {
  2991  			chanState.chanStatus |= status
  2992  		}
  2993  
  2994  		err = putOpenChannel(historicalChanBucket, chanState)
  2995  		if err != nil {
  2996  			return err
  2997  		}
  2998  
  2999  		// Finally, create a summary of this channel in the closed
  3000  		// channel bucket for this node.
  3001  		return putChannelCloseSummary(
  3002  			tx, chanPointBuf.Bytes(), summary, chanState,
  3003  		)
  3004  	}, func() {})
  3005  }
  3006  
  3007  // ChannelSnapshot is a frozen snapshot of the current channel state. A
  3008  // snapshot is detached from the original channel that generated it, providing
  3009  // read-only access to the current or prior state of an active channel.
  3010  //
  3011  // TODO(roasbeef): remove all together? pretty much just commitment
  3012  type ChannelSnapshot struct {
  3013  	// RemoteIdentity is the identity public key of the remote node that we
  3014  	// are maintaining the open channel with.
  3015  	RemoteIdentity secp256k1.PublicKey
  3016  
  3017  	// ChanPoint is the outpoint that created the channel. This output is
  3018  	// found within the funding transaction and uniquely identified the
  3019  	// channel on the resident chain.
  3020  	ChannelPoint wire.OutPoint
  3021  
  3022  	// ChainHash is the genesis hash of the chain that the channel resides
  3023  	// within.
  3024  	ChainHash chainhash.Hash
  3025  
  3026  	// Capacity is the total capacity of the channel.
  3027  	Capacity dcrutil.Amount
  3028  
  3029  	// TotalMAtomsSent is the total number of milli-atoms we've sent
  3030  	// within this channel.
  3031  	TotalMAtomsSent lnwire.MilliAtom
  3032  
  3033  	// TotalMAtomsReceived is the total number of milli-atoms we've
  3034  	// received within this channel.
  3035  	TotalMAtomsReceived lnwire.MilliAtom
  3036  
  3037  	// ChannelCommitment is the current up-to-date commitment for the
  3038  	// target channel.
  3039  	ChannelCommitment
  3040  }
  3041  
  3042  // Snapshot returns a read-only snapshot of the current channel state. This
  3043  // snapshot includes information concerning the current settled balance within
  3044  // the channel, metadata detailing total flows, and any outstanding HTLCs.
  3045  func (c *OpenChannel) Snapshot() *ChannelSnapshot {
  3046  	c.RLock()
  3047  	defer c.RUnlock()
  3048  
  3049  	localCommit := c.LocalCommitment
  3050  	snapshot := &ChannelSnapshot{
  3051  		RemoteIdentity:      *c.IdentityPub,
  3052  		ChannelPoint:        c.FundingOutpoint,
  3053  		Capacity:            c.Capacity,
  3054  		TotalMAtomsSent:     c.TotalMAtomsSent,
  3055  		TotalMAtomsReceived: c.TotalMAtomsReceived,
  3056  		ChainHash:           c.ChainHash,
  3057  		ChannelCommitment: ChannelCommitment{
  3058  			LocalBalance:  localCommit.LocalBalance,
  3059  			RemoteBalance: localCommit.RemoteBalance,
  3060  			CommitHeight:  localCommit.CommitHeight,
  3061  			CommitFee:     localCommit.CommitFee,
  3062  		},
  3063  	}
  3064  
  3065  	// Copy over the current set of HTLCs to ensure the caller can't mutate
  3066  	// our internal state.
  3067  	snapshot.Htlcs = make([]HTLC, len(localCommit.Htlcs))
  3068  	for i, h := range localCommit.Htlcs {
  3069  		snapshot.Htlcs[i] = h.Copy()
  3070  	}
  3071  
  3072  	return snapshot
  3073  }
  3074  
  3075  // LatestCommitments returns the two latest commitments for both the local and
  3076  // remote party. These commitments are read from disk to ensure that only the
  3077  // latest fully committed state is returned. The first commitment returned is
  3078  // the local commitment, and the second returned is the remote commitment.
  3079  func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, error) {
  3080  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  3081  		chanBucket, err := fetchChanBucket(
  3082  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  3083  		)
  3084  		if err != nil {
  3085  			return err
  3086  		}
  3087  
  3088  		return fetchChanCommitments(chanBucket, c)
  3089  	}, func() {})
  3090  	if err != nil {
  3091  		return nil, nil, err
  3092  	}
  3093  
  3094  	return &c.LocalCommitment, &c.RemoteCommitment, nil
  3095  }
  3096  
  3097  // RemoteRevocationStore returns the most up to date commitment version of the
  3098  // revocation storage tree for the remote party. This method can be used when
  3099  // acting on a possible contract breach to ensure, that the caller has the most
  3100  // up to date information required to deliver justice.
  3101  func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
  3102  	err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
  3103  		chanBucket, err := fetchChanBucket(
  3104  			tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
  3105  		)
  3106  		if err != nil {
  3107  			return err
  3108  		}
  3109  
  3110  		return fetchChanRevocationState(chanBucket, c)
  3111  	}, func() {})
  3112  	if err != nil {
  3113  		return nil, err
  3114  	}
  3115  
  3116  	return c.RevocationStore, nil
  3117  }
  3118  
  3119  // AbsoluteThawHeight determines a frozen channel's absolute thaw height. If the
  3120  // channel is not frozen, then 0 is returned.
  3121  func (c *OpenChannel) AbsoluteThawHeight() (uint32, error) {
  3122  	// Only frozen channels have a thaw height.
  3123  	if !c.ChanType.IsFrozen() && !c.ChanType.HasLeaseExpiration() {
  3124  		return 0, nil
  3125  	}
  3126  
  3127  	// If the channel has the frozen bit set and it's thaw height is below
  3128  	// the absolute threshold, then it's interpreted as a relative height to
  3129  	// the chain's current height.
  3130  	if c.ChanType.IsFrozen() && c.ThawHeight < AbsoluteThawHeightThreshold {
  3131  		// We'll only known of the channel's short ID once it's
  3132  		// confirmed.
  3133  		if c.IsPending {
  3134  			return 0, errors.New("cannot use relative thaw " +
  3135  				"height for unconfirmed channel")
  3136  		}
  3137  		return c.ShortChannelID.BlockHeight + c.ThawHeight, nil
  3138  	}
  3139  
  3140  	return c.ThawHeight, nil
  3141  }
  3142  
  3143  func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte,
  3144  	summary *ChannelCloseSummary, lastChanState *OpenChannel) error {
  3145  
  3146  	closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket)
  3147  	if err != nil {
  3148  		return err
  3149  	}
  3150  
  3151  	summary.RemoteCurrentRevocation = lastChanState.RemoteCurrentRevocation
  3152  	summary.RemoteNextRevocation = lastChanState.RemoteNextRevocation
  3153  	summary.LocalChanConfig = lastChanState.LocalChanCfg
  3154  
  3155  	var b bytes.Buffer
  3156  	if err := serializeChannelCloseSummary(&b, summary); err != nil {
  3157  		return err
  3158  	}
  3159  
  3160  	return closedChanBucket.Put(chanID, b.Bytes())
  3161  }
  3162  
  3163  func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
  3164  	err := WriteElements(w,
  3165  		cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID,
  3166  		cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance,
  3167  		cs.TimeLockedBalance, cs.CloseType, cs.IsPending,
  3168  	)
  3169  	if err != nil {
  3170  		return err
  3171  	}
  3172  
  3173  	// If this is a close channel summary created before the addition of
  3174  	// the new fields, then we can exit here.
  3175  	if cs.RemoteCurrentRevocation == nil {
  3176  		return WriteElements(w, false)
  3177  	}
  3178  
  3179  	// If fields are present, write boolean to indicate this, and continue.
  3180  	if err := WriteElements(w, true); err != nil {
  3181  		return err
  3182  	}
  3183  
  3184  	if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil {
  3185  		return err
  3186  	}
  3187  
  3188  	if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil {
  3189  		return err
  3190  	}
  3191  
  3192  	// The RemoteNextRevocation field is optional, as it's possible for a
  3193  	// channel to be closed before we learn of the next unrevoked
  3194  	// revocation point for the remote party. Write a boolen indicating
  3195  	// whether this field is present or not.
  3196  	if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
  3197  		return err
  3198  	}
  3199  
  3200  	// Write the field, if present.
  3201  	if cs.RemoteNextRevocation != nil {
  3202  		if err = WriteElements(w, cs.RemoteNextRevocation); err != nil {
  3203  			return err
  3204  		}
  3205  	}
  3206  
  3207  	// Write whether the channel sync message is present.
  3208  	if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil {
  3209  		return err
  3210  	}
  3211  
  3212  	// Write the channel sync message, if present.
  3213  	if cs.LastChanSyncMsg != nil {
  3214  		if err := WriteElements(w, cs.LastChanSyncMsg); err != nil {
  3215  			return err
  3216  		}
  3217  	}
  3218  
  3219  	return nil
  3220  }
  3221  
  3222  func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) {
  3223  	c := &ChannelCloseSummary{}
  3224  
  3225  	err := ReadElements(r,
  3226  		&c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
  3227  		&c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
  3228  		&c.TimeLockedBalance, &c.CloseType, &c.IsPending,
  3229  	)
  3230  	if err != nil {
  3231  		return nil, err
  3232  	}
  3233  
  3234  	// We'll now check to see if the channel close summary was encoded with
  3235  	// any of the additional optional fields.
  3236  	var hasNewFields bool
  3237  	err = ReadElements(r, &hasNewFields)
  3238  	if err != nil {
  3239  		return nil, err
  3240  	}
  3241  
  3242  	// If fields are not present, we can return.
  3243  	if !hasNewFields {
  3244  		return c, nil
  3245  	}
  3246  
  3247  	// Otherwise read the new fields.
  3248  	if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil {
  3249  		return nil, err
  3250  	}
  3251  
  3252  	if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
  3253  		return nil, err
  3254  	}
  3255  
  3256  	// Finally, we'll attempt to read the next unrevoked commitment point
  3257  	// for the remote party. If we closed the channel before receiving a
  3258  	// funding locked message then this might not be present. A boolean
  3259  	// indicating whether the field is present will come first.
  3260  	var hasRemoteNextRevocation bool
  3261  	err = ReadElements(r, &hasRemoteNextRevocation)
  3262  	if err != nil {
  3263  		return nil, err
  3264  	}
  3265  
  3266  	// If this field was written, read it.
  3267  	if hasRemoteNextRevocation {
  3268  		err = ReadElements(r, &c.RemoteNextRevocation)
  3269  		if err != nil {
  3270  			return nil, err
  3271  		}
  3272  	}
  3273  
  3274  	// Check if we have a channel sync message to read.
  3275  	var hasChanSyncMsg bool
  3276  	err = ReadElements(r, &hasChanSyncMsg)
  3277  	if err == io.EOF {
  3278  		return c, nil
  3279  	} else if err != nil {
  3280  		return nil, err
  3281  	}
  3282  
  3283  	// If a chan sync message is present, read it.
  3284  	if hasChanSyncMsg {
  3285  		// We must pass in reference to a lnwire.Message for the codec
  3286  		// to support it.
  3287  		var msg lnwire.Message
  3288  		if err := ReadElements(r, &msg); err != nil {
  3289  			return nil, err
  3290  		}
  3291  
  3292  		chanSync, ok := msg.(*lnwire.ChannelReestablish)
  3293  		if !ok {
  3294  			return nil, errors.New("unable cast db Message to " +
  3295  				"ChannelReestablish")
  3296  		}
  3297  		c.LastChanSyncMsg = chanSync
  3298  	}
  3299  
  3300  	return c, nil
  3301  }
  3302  
  3303  func writeChanConfig(b io.Writer, c *ChannelConfig) error {
  3304  	return WriteElements(b,
  3305  		c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC,
  3306  		c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey,
  3307  		c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint,
  3308  		c.HtlcBasePoint,
  3309  	)
  3310  }
  3311  
  3312  // fundingTxPresent returns true if expect the funding transcation to be found
  3313  // on disk or already populated within the passed oen chanel struct.
  3314  func fundingTxPresent(channel *OpenChannel) bool {
  3315  	chanType := channel.ChanType
  3316  
  3317  	return chanType.IsSingleFunder() && chanType.HasFundingTx() &&
  3318  		channel.IsInitiator &&
  3319  		!channel.hasChanStatus(ChanStatusRestored)
  3320  }
  3321  
  3322  func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
  3323  	var w bytes.Buffer
  3324  	if err := WriteElements(&w,
  3325  		channel.ChanType, channel.ChainHash, channel.FundingOutpoint,
  3326  		channel.ShortChannelID, channel.IsPending, channel.IsInitiator,
  3327  		channel.chanStatus, channel.FundingBroadcastHeight,
  3328  		channel.NumConfsRequired, channel.ChannelFlags,
  3329  		channel.IdentityPub, channel.Capacity, channel.TotalMAtomsSent,
  3330  		channel.TotalMAtomsReceived,
  3331  	); err != nil {
  3332  		return err
  3333  	}
  3334  
  3335  	// For single funder channels that we initiated, and we have the
  3336  	// funding transaction, then write the funding txn.
  3337  	if fundingTxPresent(channel) {
  3338  		if err := WriteElement(&w, channel.FundingTxn); err != nil {
  3339  			return err
  3340  		}
  3341  	}
  3342  
  3343  	if err := writeChanConfig(&w, &channel.LocalChanCfg); err != nil {
  3344  		return err
  3345  	}
  3346  	if err := writeChanConfig(&w, &channel.RemoteChanCfg); err != nil {
  3347  		return err
  3348  	}
  3349  
  3350  	// Write the RevocationKeyLocator as the first entry in a tlv stream.
  3351  	keyLocRecord := MakeKeyLocRecord(
  3352  		keyLocType, &channel.RevocationKeyLocator,
  3353  	)
  3354  
  3355  	tlvStream, err := tlv.NewStream(keyLocRecord)
  3356  	if err != nil {
  3357  		return err
  3358  	}
  3359  
  3360  	if err := tlvStream.Encode(&w); err != nil {
  3361  		return err
  3362  	}
  3363  
  3364  	if err := chanBucket.Put(chanInfoKey, w.Bytes()); err != nil {
  3365  		return err
  3366  	}
  3367  
  3368  	// Finally, add optional shutdown scripts for the local and remote peer if
  3369  	// they are present.
  3370  	if err := putOptionalUpfrontShutdownScript(
  3371  		chanBucket, localUpfrontShutdownKey, channel.LocalShutdownScript,
  3372  	); err != nil {
  3373  		return err
  3374  	}
  3375  
  3376  	return putOptionalUpfrontShutdownScript(
  3377  		chanBucket, remoteUpfrontShutdownKey, channel.RemoteShutdownScript,
  3378  	)
  3379  }
  3380  
  3381  // putOptionalUpfrontShutdownScript adds a shutdown script under the key
  3382  // provided if it has a non-zero length.
  3383  func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte,
  3384  	script []byte) error {
  3385  	// If the script is empty, we do not need to add anything.
  3386  	if len(script) == 0 {
  3387  		return nil
  3388  	}
  3389  
  3390  	var w bytes.Buffer
  3391  	if err := WriteElement(&w, script); err != nil {
  3392  		return err
  3393  	}
  3394  
  3395  	return chanBucket.Put(key, w.Bytes())
  3396  }
  3397  
  3398  // getOptionalUpfrontShutdownScript reads the shutdown script stored under the
  3399  // key provided if it is present. Upfront shutdown scripts are optional, so the
  3400  // function returns with no error if the key is not present.
  3401  func getOptionalUpfrontShutdownScript(chanBucket kvdb.RBucket, key []byte,
  3402  	script *lnwire.DeliveryAddress) error {
  3403  
  3404  	// Return early if the bucket does not exit, a shutdown script was not set.
  3405  	bs := chanBucket.Get(key)
  3406  	if bs == nil {
  3407  		return nil
  3408  	}
  3409  
  3410  	var tempScript []byte
  3411  	r := bytes.NewReader(bs)
  3412  	if err := ReadElement(r, &tempScript); err != nil {
  3413  		return err
  3414  	}
  3415  	*script = tempScript
  3416  
  3417  	return nil
  3418  }
  3419  
  3420  func serializeChanCommit(w io.Writer, c *ChannelCommitment) error {
  3421  	if err := WriteElements(w,
  3422  		c.CommitHeight, c.LocalLogIndex, c.LocalHtlcIndex,
  3423  		c.RemoteLogIndex, c.RemoteHtlcIndex, c.LocalBalance,
  3424  		c.RemoteBalance, c.CommitFee, c.FeePerKB, c.CommitTx,
  3425  		c.CommitSig,
  3426  	); err != nil {
  3427  		return err
  3428  	}
  3429  
  3430  	return SerializeHtlcs(w, c.Htlcs...)
  3431  }
  3432  
  3433  func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment,
  3434  	local bool) error {
  3435  
  3436  	var commitKey []byte
  3437  	if local {
  3438  		commitKey = append(chanCommitmentKey, byte(0x00))
  3439  	} else {
  3440  		commitKey = append(chanCommitmentKey, byte(0x01))
  3441  	}
  3442  
  3443  	var b bytes.Buffer
  3444  	if err := serializeChanCommit(&b, c); err != nil {
  3445  		return err
  3446  	}
  3447  
  3448  	return chanBucket.Put(commitKey, b.Bytes())
  3449  }
  3450  
  3451  func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
  3452  	// If this is a restored channel, then we don't have any commitments to
  3453  	// write.
  3454  	if channel.hasChanStatus(ChanStatusRestored) {
  3455  		return nil
  3456  	}
  3457  
  3458  	err := putChanCommitment(
  3459  		chanBucket, &channel.LocalCommitment, true,
  3460  	)
  3461  	if err != nil {
  3462  		return err
  3463  	}
  3464  
  3465  	return putChanCommitment(
  3466  		chanBucket, &channel.RemoteCommitment, false,
  3467  	)
  3468  }
  3469  
  3470  func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
  3471  
  3472  	var b bytes.Buffer
  3473  	err := WriteElements(
  3474  		&b, channel.RemoteCurrentRevocation, channel.RevocationProducer,
  3475  		channel.RevocationStore,
  3476  	)
  3477  	if err != nil {
  3478  		return err
  3479  	}
  3480  
  3481  	// TODO(roasbeef): don't keep producer on disk
  3482  
  3483  	// If the next revocation is present, which is only the case after the
  3484  	// FundingLocked message has been sent, then we'll write it to disk.
  3485  	if channel.RemoteNextRevocation != nil {
  3486  		err = WriteElements(&b, channel.RemoteNextRevocation)
  3487  		if err != nil {
  3488  			return err
  3489  		}
  3490  	}
  3491  
  3492  	return chanBucket.Put(revocationStateKey, b.Bytes())
  3493  }
  3494  
  3495  func readChanConfig(b io.Reader, c *ChannelConfig) error {
  3496  	return ReadElements(b,
  3497  		&c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve,
  3498  		&c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay,
  3499  		&c.MultiSigKey, &c.RevocationBasePoint,
  3500  		&c.PaymentBasePoint, &c.DelayBasePoint,
  3501  		&c.HtlcBasePoint,
  3502  	)
  3503  }
  3504  
  3505  func fetchChanInfo(chanBucket kvdb.RBucket, channel *OpenChannel) error {
  3506  	infoBytes := chanBucket.Get(chanInfoKey)
  3507  	if infoBytes == nil {
  3508  		return ErrNoChanInfoFound
  3509  	}
  3510  	r := bytes.NewReader(infoBytes)
  3511  
  3512  	if err := ReadElements(r,
  3513  		&channel.ChanType, &channel.ChainHash, &channel.FundingOutpoint,
  3514  		&channel.ShortChannelID, &channel.IsPending, &channel.IsInitiator,
  3515  		&channel.chanStatus, &channel.FundingBroadcastHeight,
  3516  		&channel.NumConfsRequired, &channel.ChannelFlags,
  3517  		&channel.IdentityPub, &channel.Capacity, &channel.TotalMAtomsSent,
  3518  		&channel.TotalMAtomsReceived,
  3519  	); err != nil {
  3520  		return err
  3521  	}
  3522  
  3523  	// For single funder channels that we initiated and have the funding
  3524  	// transaction to, read the funding txn.
  3525  	if fundingTxPresent(channel) {
  3526  		if err := ReadElement(r, &channel.FundingTxn); err != nil {
  3527  			return err
  3528  		}
  3529  	}
  3530  
  3531  	if err := readChanConfig(r, &channel.LocalChanCfg); err != nil {
  3532  		return err
  3533  	}
  3534  	if err := readChanConfig(r, &channel.RemoteChanCfg); err != nil {
  3535  		return err
  3536  	}
  3537  
  3538  	// Retrieve the boolean stored under lastWasRevokeKey.
  3539  	lastWasRevokeBytes := chanBucket.Get(lastWasRevokeKey)
  3540  	if lastWasRevokeBytes == nil {
  3541  		// If nothing has been stored under this key, we store false in the
  3542  		// OpenChannel struct.
  3543  		channel.LastWasRevoke = false
  3544  	} else {
  3545  		// Otherwise, read the value into the LastWasRevoke field.
  3546  		revokeReader := bytes.NewReader(lastWasRevokeBytes)
  3547  		err := ReadElements(revokeReader, &channel.LastWasRevoke)
  3548  		if err != nil {
  3549  			return err
  3550  		}
  3551  	}
  3552  
  3553  	keyLocRecord := MakeKeyLocRecord(keyLocType, &channel.RevocationKeyLocator)
  3554  	tlvStream, err := tlv.NewStream(keyLocRecord)
  3555  	if err != nil {
  3556  		return err
  3557  	}
  3558  
  3559  	if err := tlvStream.Decode(r); err != nil {
  3560  		return err
  3561  	}
  3562  
  3563  	channel.Packager = NewChannelPackager(channel.ShortChannelID)
  3564  
  3565  	// Finally, read the optional shutdown scripts.
  3566  	if err := getOptionalUpfrontShutdownScript(
  3567  		chanBucket, localUpfrontShutdownKey, &channel.LocalShutdownScript,
  3568  	); err != nil {
  3569  		return err
  3570  	}
  3571  
  3572  	return getOptionalUpfrontShutdownScript(
  3573  		chanBucket, remoteUpfrontShutdownKey, &channel.RemoteShutdownScript,
  3574  	)
  3575  }
  3576  
  3577  func deserializeChanCommit(r io.Reader) (ChannelCommitment, error) {
  3578  	var c ChannelCommitment
  3579  
  3580  	err := ReadElements(r,
  3581  		&c.CommitHeight, &c.LocalLogIndex, &c.LocalHtlcIndex, &c.RemoteLogIndex,
  3582  		&c.RemoteHtlcIndex, &c.LocalBalance, &c.RemoteBalance,
  3583  		&c.CommitFee, &c.FeePerKB, &c.CommitTx, &c.CommitSig,
  3584  	)
  3585  	if err != nil {
  3586  		return c, err
  3587  	}
  3588  
  3589  	c.Htlcs, err = DeserializeHtlcs(r)
  3590  	if err != nil {
  3591  		return c, err
  3592  	}
  3593  
  3594  	return c, nil
  3595  }
  3596  
  3597  func fetchChanCommitment(chanBucket kvdb.RBucket, local bool) (ChannelCommitment, error) {
  3598  	var commitKey []byte
  3599  	if local {
  3600  		commitKey = append(chanCommitmentKey, 0x00)
  3601  	} else {
  3602  		commitKey = append(chanCommitmentKey, 0x01)
  3603  	}
  3604  
  3605  	commitBytes := chanBucket.Get(commitKey)
  3606  	if commitBytes == nil {
  3607  		return ChannelCommitment{}, ErrNoCommitmentsFound
  3608  	}
  3609  
  3610  	r := bytes.NewReader(commitBytes)
  3611  	return deserializeChanCommit(r)
  3612  }
  3613  
  3614  func fetchChanCommitments(chanBucket kvdb.RBucket, channel *OpenChannel) error {
  3615  	var err error
  3616  
  3617  	// If this is a restored channel, then we don't have any commitments to
  3618  	// read.
  3619  	if channel.hasChanStatus(ChanStatusRestored) {
  3620  		return nil
  3621  	}
  3622  
  3623  	channel.LocalCommitment, err = fetchChanCommitment(chanBucket, true)
  3624  	if err != nil {
  3625  		return err
  3626  	}
  3627  	channel.RemoteCommitment, err = fetchChanCommitment(chanBucket, false)
  3628  	if err != nil {
  3629  		return err
  3630  	}
  3631  
  3632  	return nil
  3633  }
  3634  
  3635  func fetchChanRevocationState(chanBucket kvdb.RBucket, channel *OpenChannel) error {
  3636  	revBytes := chanBucket.Get(revocationStateKey)
  3637  	if revBytes == nil {
  3638  		return ErrNoRevocationsFound
  3639  	}
  3640  	r := bytes.NewReader(revBytes)
  3641  
  3642  	err := ReadElements(
  3643  		r, &channel.RemoteCurrentRevocation, &channel.RevocationProducer,
  3644  		&channel.RevocationStore,
  3645  	)
  3646  	if err != nil {
  3647  		return err
  3648  	}
  3649  
  3650  	// If there aren't any bytes left in the buffer, then we don't yet have
  3651  	// the next remote revocation, so we can exit early here.
  3652  	if r.Len() == 0 {
  3653  		return nil
  3654  	}
  3655  
  3656  	// Otherwise we'll read the next revocation for the remote party which
  3657  	// is always the last item within the buffer.
  3658  	return ReadElements(r, &channel.RemoteNextRevocation)
  3659  }
  3660  
  3661  func deleteOpenChannel(chanBucket kvdb.RwBucket) error {
  3662  
  3663  	if err := chanBucket.Delete(chanInfoKey); err != nil {
  3664  		return err
  3665  	}
  3666  
  3667  	err := chanBucket.Delete(append(chanCommitmentKey, byte(0x00)))
  3668  	if err != nil {
  3669  		return err
  3670  	}
  3671  	err = chanBucket.Delete(append(chanCommitmentKey, byte(0x01)))
  3672  	if err != nil {
  3673  		return err
  3674  	}
  3675  
  3676  	if err := chanBucket.Delete(revocationStateKey); err != nil {
  3677  		return err
  3678  	}
  3679  
  3680  	if diff := chanBucket.Get(commitDiffKey); diff != nil {
  3681  		return chanBucket.Delete(commitDiffKey)
  3682  	}
  3683  
  3684  	return nil
  3685  
  3686  }
  3687  
  3688  // makeLogKey converts a uint64 into an 8 byte array.
  3689  func makeLogKey(updateNum uint64) [8]byte {
  3690  	var key [8]byte
  3691  	byteOrder.PutUint64(key[:], updateNum)
  3692  	return key
  3693  }
  3694  
  3695  func appendChannelLogEntry(log kvdb.RwBucket,
  3696  	commit *ChannelCommitment) error {
  3697  
  3698  	var b bytes.Buffer
  3699  	if err := serializeChanCommit(&b, commit); err != nil {
  3700  		return err
  3701  	}
  3702  
  3703  	logEntrykey := makeLogKey(commit.CommitHeight)
  3704  	return log.Put(logEntrykey[:], b.Bytes())
  3705  }
  3706  
  3707  func fetchChannelLogEntry(log kvdb.RBucket,
  3708  	updateNum uint64) (ChannelCommitment, error) {
  3709  
  3710  	logEntrykey := makeLogKey(updateNum)
  3711  	commitBytes := log.Get(logEntrykey[:])
  3712  	if commitBytes == nil {
  3713  		return ChannelCommitment{}, ErrLogEntryNotFound
  3714  	}
  3715  
  3716  	commitReader := bytes.NewReader(commitBytes)
  3717  	return deserializeChanCommit(commitReader)
  3718  }
  3719  
  3720  func fetchThawHeight(chanBucket kvdb.RBucket) (uint32, error) {
  3721  	var height uint32
  3722  
  3723  	heightBytes := chanBucket.Get(frozenChanKey)
  3724  	heightReader := bytes.NewReader(heightBytes)
  3725  
  3726  	if err := ReadElements(heightReader, &height); err != nil {
  3727  		return 0, err
  3728  	}
  3729  
  3730  	return height, nil
  3731  }
  3732  
  3733  func storeThawHeight(chanBucket kvdb.RwBucket, height uint32) error {
  3734  	var heightBuf bytes.Buffer
  3735  	if err := WriteElements(&heightBuf, height); err != nil {
  3736  		return err
  3737  	}
  3738  
  3739  	return chanBucket.Put(frozenChanKey, heightBuf.Bytes())
  3740  }
  3741  
  3742  func deleteThawHeight(chanBucket kvdb.RwBucket) error {
  3743  	return chanBucket.Delete(frozenChanKey)
  3744  }
  3745  
  3746  // EKeyLocator is an encoder for keychain.KeyLocator.
  3747  func EKeyLocator(w io.Writer, val interface{}, buf *[8]byte) error {
  3748  	if v, ok := val.(*keychain.KeyLocator); ok {
  3749  		err := tlv.EUint32T(w, uint32(v.Family), buf)
  3750  		if err != nil {
  3751  			return err
  3752  		}
  3753  
  3754  		return tlv.EUint32T(w, v.Index, buf)
  3755  	}
  3756  	return tlv.NewTypeForEncodingErr(val, "keychain.KeyLocator")
  3757  }
  3758  
  3759  // DKeyLocator is a decoder for keychain.KeyLocator.
  3760  func DKeyLocator(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
  3761  	if v, ok := val.(*keychain.KeyLocator); ok {
  3762  		var family uint32
  3763  		err := tlv.DUint32(r, &family, buf, 4)
  3764  		if err != nil {
  3765  			return err
  3766  		}
  3767  		v.Family = keychain.KeyFamily(family)
  3768  
  3769  		return tlv.DUint32(r, &v.Index, buf, 4)
  3770  	}
  3771  	return tlv.NewTypeForDecodingErr(val, "keychain.KeyLocator", l, 8)
  3772  }
  3773  
  3774  // MakeKeyLocRecord creates a Record out of a KeyLocator using the passed
  3775  // Type and the EKeyLocator and DKeyLocator functions. The size will always be
  3776  // 8 as KeyFamily is uint32 and the Index is uint32.
  3777  func MakeKeyLocRecord(typ tlv.Type, keyLoc *keychain.KeyLocator) tlv.Record {
  3778  	return tlv.MakeStaticRecord(typ, keyLoc, 8, EKeyLocator, DKeyLocator)
  3779  }