github.com/decred/dcrlnd@v0.7.6/contractcourt/chain_watcher.go (about) 1 package contractcourt 2 3 import ( 4 "bytes" 5 "fmt" 6 "sync" 7 "sync/atomic" 8 "time" 9 10 "github.com/davecgh/go-spew/spew" 11 "github.com/decred/dcrd/chaincfg/chainhash" 12 "github.com/decred/dcrd/chaincfg/v3" 13 "github.com/decred/dcrd/dcrec/secp256k1/v4" 14 "github.com/decred/dcrd/dcrutil/v4" 15 "github.com/decred/dcrd/txscript/v4/stdaddr" 16 "github.com/decred/dcrd/txscript/v4/stdscript" 17 "github.com/decred/dcrd/wire" 18 "github.com/decred/dcrlnd/chainntnfs" 19 "github.com/decred/dcrlnd/channeldb" 20 "github.com/decred/dcrlnd/input" 21 "github.com/decred/dcrlnd/lnwallet" 22 ) 23 24 const ( 25 // minCommitPointPollTimeout is the minimum time we'll wait before 26 // polling the database for a channel's commitpoint. 27 minCommitPointPollTimeout = 1 * time.Second 28 29 // maxCommitPointPollTimeout is the maximum time we'll wait before 30 // polling the database for a channel's commitpoint. 31 maxCommitPointPollTimeout = 10 * time.Minute 32 ) 33 34 // LocalUnilateralCloseInfo encapsulates all the information we need to act on 35 // a local force close that gets confirmed. 36 type LocalUnilateralCloseInfo struct { 37 *chainntnfs.SpendDetail 38 *lnwallet.LocalForceCloseSummary 39 *channeldb.ChannelCloseSummary 40 41 // CommitSet is the set of known valid commitments at the time the 42 // remote party's commitment hit the chain. 43 CommitSet CommitSet 44 } 45 46 // CooperativeCloseInfo encapsulates all the information we need to act on a 47 // cooperative close that gets confirmed. 48 type CooperativeCloseInfo struct { 49 *channeldb.ChannelCloseSummary 50 } 51 52 // RemoteUnilateralCloseInfo wraps the normal UnilateralCloseSummary to couple 53 // the CommitSet at the time of channel closure. 54 type RemoteUnilateralCloseInfo struct { 55 *lnwallet.UnilateralCloseSummary 56 57 // CommitSet is the set of known valid commitments at the time the 58 // remote party's commitment hit the chain. 59 CommitSet CommitSet 60 } 61 62 // BreachResolution wraps the outpoint of the breached channel. 63 type BreachResolution struct { 64 FundingOutPoint wire.OutPoint 65 } 66 67 // BreachCloseInfo wraps the BreachResolution with a CommitSet for the latest, 68 // non-breached state, with the AnchorResolution for the breached state. 69 type BreachCloseInfo struct { 70 *BreachResolution 71 *lnwallet.AnchorResolution 72 73 // CommitHash is the hash of the commitment transaction. 74 CommitHash chainhash.Hash 75 76 // CommitSet is the set of known valid commitments at the time the 77 // breach occurred on-chain. 78 CommitSet CommitSet 79 80 // CloseSummary gives the recipient of the BreachCloseInfo information 81 // to mark the channel closed in the database. 82 CloseSummary channeldb.ChannelCloseSummary 83 } 84 85 // CommitSet is a collection of the set of known valid commitments at a given 86 // instant. If ConfCommitKey is set, then the commitment identified by the 87 // HtlcSetKey has hit the chain. This struct will be used to examine all live 88 // HTLCs to determine if any additional actions need to be made based on the 89 // remote party's commitments. 90 type CommitSet struct { 91 // ConfCommitKey if non-nil, identifies the commitment that was 92 // confirmed in the chain. 93 ConfCommitKey *HtlcSetKey 94 95 // HtlcSets stores the set of all known active HTLC for each active 96 // commitment at the time of channel closure. 97 HtlcSets map[HtlcSetKey][]channeldb.HTLC 98 } 99 100 // IsEmpty returns true if there are no HTLCs at all within all commitments 101 // that are a part of this commitment diff. 102 func (c *CommitSet) IsEmpty() bool { 103 if c == nil { 104 return true 105 } 106 107 for _, htlcs := range c.HtlcSets { 108 if len(htlcs) != 0 { 109 return false 110 } 111 } 112 113 return true 114 } 115 116 // toActiveHTLCSets returns the set of all active HTLCs across all commitment 117 // transactions. 118 func (c *CommitSet) toActiveHTLCSets() map[HtlcSetKey]htlcSet { 119 htlcSets := make(map[HtlcSetKey]htlcSet) 120 121 for htlcSetKey, htlcs := range c.HtlcSets { 122 htlcSets[htlcSetKey] = newHtlcSet(htlcs) 123 } 124 125 return htlcSets 126 } 127 128 // ChainEventSubscription is a struct that houses a subscription to be notified 129 // for any on-chain events related to a channel. There are three types of 130 // possible on-chain events: a cooperative channel closure, a unilateral 131 // channel closure, and a channel breach. The fourth type: a force close is 132 // locally initiated, so we don't provide any event stream for said event. 133 type ChainEventSubscription struct { 134 // ChanPoint is that channel that chain events will be dispatched for. 135 ChanPoint wire.OutPoint 136 137 // RemoteUnilateralClosure is a channel that will be sent upon in the 138 // event that the remote party's commitment transaction is confirmed. 139 RemoteUnilateralClosure chan *RemoteUnilateralCloseInfo 140 141 // LocalUnilateralClosure is a channel that will be sent upon in the 142 // event that our commitment transaction is confirmed. 143 LocalUnilateralClosure chan *LocalUnilateralCloseInfo 144 145 // CooperativeClosure is a signal that will be sent upon once a 146 // cooperative channel closure has been detected confirmed. 147 CooperativeClosure chan *CooperativeCloseInfo 148 149 // ContractBreach is a channel that will be sent upon if we detect a 150 // contract breach. The struct sent across the channel contains all the 151 // material required to bring the cheating channel peer to justice. 152 ContractBreach chan *BreachCloseInfo 153 154 // Cancel cancels the subscription to the event stream for a particular 155 // channel. This method should be called once the caller no longer needs to 156 // be notified of any on-chain events for a particular channel. 157 Cancel func() 158 } 159 160 // chainWatcherConfig encapsulates all the necessary functions and interfaces 161 // needed to watch and act on on-chain events for a particular channel. 162 type chainWatcherConfig struct { 163 // netParams are the network parameters for the current chain. 164 netParams *chaincfg.Params 165 166 // chanState is a snapshot of the persistent state of the channel that 167 // we're watching. In the event of an on-chain event, we'll query the 168 // database to ensure that we act using the most up to date state. 169 chanState *channeldb.OpenChannel 170 171 // notifier is a reference to the channel notifier that we'll use to be 172 // notified of output spends and when transactions are confirmed. 173 notifier chainntnfs.ChainNotifier 174 175 // signer is the main signer instances that will be responsible for 176 // signing any HTLC and commitment transaction generated by the state 177 // machine. 178 signer input.Signer 179 180 // contractBreach is a method that will be called by the watcher if it 181 // detects that a contract breach transaction has been confirmed. It 182 // will only return a non-nil error when the breachArbiter has 183 // preserved the necessary breach info for this channel point. 184 contractBreach func(*lnwallet.BreachRetribution) error 185 186 // isOurAddr is a function that returns true if the passed address is 187 // known to us. 188 isOurAddr func(stdaddr.Address) bool 189 190 // extractStateNumHint extracts the encoded state hint using the passed 191 // obfuscater. This is used by the chain watcher to identify which 192 // state was broadcast and confirmed on-chain. 193 extractStateNumHint func(*wire.MsgTx, [lnwallet.StateHintSize]byte) uint64 194 } 195 196 // chainWatcher is a system that's assigned to every active channel. The duty 197 // of this system is to watch the chain for spends of the channels chan point. 198 // If a spend is detected then with chain watcher will notify all subscribers 199 // that the channel has been closed, and also give them the materials necessary 200 // to sweep the funds of the channel on chain eventually. 201 type chainWatcher struct { 202 started int32 // To be used atomically. 203 stopped int32 // To be used atomically. 204 205 quit chan struct{} 206 wg sync.WaitGroup 207 208 cfg chainWatcherConfig 209 210 // stateHintObfuscator is a 48-bit state hint that's used to obfuscate 211 // the current state number on the commitment transactions. 212 stateHintObfuscator [lnwallet.StateHintSize]byte 213 214 // All the fields below are protected by this mutex. 215 sync.Mutex 216 217 // clientID is an ephemeral counter used to keep track of each 218 // individual client subscription. 219 clientID uint64 220 221 // clientSubscriptions is a map that keeps track of all the active 222 // client subscriptions for events related to this channel. 223 clientSubscriptions map[uint64]*ChainEventSubscription 224 } 225 226 // newChainWatcher returns a new instance of a chainWatcher for a channel given 227 // the chan point to watch, and also a notifier instance that will allow us to 228 // detect on chain events. 229 func newChainWatcher(cfg chainWatcherConfig) (*chainWatcher, error) { 230 // In order to be able to detect the nature of a potential channel 231 // closure we'll need to reconstruct the state hint bytes used to 232 // obfuscate the commitment state number encoded in the lock time and 233 // sequence fields. 234 var stateHint [lnwallet.StateHintSize]byte 235 chanState := cfg.chanState 236 if chanState.IsInitiator { 237 stateHint = lnwallet.DeriveStateHintObfuscator( 238 chanState.LocalChanCfg.PaymentBasePoint.PubKey, 239 chanState.RemoteChanCfg.PaymentBasePoint.PubKey, 240 ) 241 } else { 242 stateHint = lnwallet.DeriveStateHintObfuscator( 243 chanState.RemoteChanCfg.PaymentBasePoint.PubKey, 244 chanState.LocalChanCfg.PaymentBasePoint.PubKey, 245 ) 246 } 247 248 return &chainWatcher{ 249 cfg: cfg, 250 stateHintObfuscator: stateHint, 251 quit: make(chan struct{}), 252 clientSubscriptions: make(map[uint64]*ChainEventSubscription), 253 }, nil 254 } 255 256 // Start starts all goroutines that the chainWatcher needs to perform its 257 // duties. 258 func (c *chainWatcher) Start() error { 259 if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { 260 return nil 261 } 262 263 chanState := c.cfg.chanState 264 log.Debugf("Starting chain watcher for ChannelPoint(%v)", 265 chanState.FundingOutpoint) 266 267 // First, we'll register for a notification to be dispatched if the 268 // funding output is spent. 269 fundingOut := &chanState.FundingOutpoint 270 271 // As a height hint, we'll try to use the opening height, but if the 272 // channel isn't yet open, then we'll use the height it was broadcast 273 // at. 274 heightHint := c.cfg.chanState.ShortChanID().BlockHeight 275 if heightHint == 0 { 276 heightHint = chanState.FundingBroadcastHeight 277 } 278 279 localKey := chanState.LocalChanCfg.MultiSigKey.PubKey.SerializeCompressed() 280 remoteKey := chanState.RemoteChanCfg.MultiSigKey.PubKey.SerializeCompressed() 281 multiSigScript, err := input.GenMultiSigScript( 282 localKey, remoteKey, 283 ) 284 if err != nil { 285 return err 286 } 287 pkScript, err := input.ScriptHashPkScript(multiSigScript) 288 if err != nil { 289 return err 290 } 291 292 spendNtfn, err := c.cfg.notifier.RegisterSpendNtfn( 293 fundingOut, pkScript, heightHint, 294 ) 295 if err != nil { 296 return err 297 } 298 299 // With the spend notification obtained, we'll now dispatch the 300 // closeObserver which will properly react to any changes. 301 c.wg.Add(1) 302 go c.closeObserver(spendNtfn) 303 304 return nil 305 } 306 307 // Stop signals the close observer to gracefully exit. 308 func (c *chainWatcher) Stop() error { 309 if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) { 310 return nil 311 } 312 313 close(c.quit) 314 315 c.wg.Wait() 316 317 return nil 318 } 319 320 // SubscribeChannelEvents returns an active subscription to the set of channel 321 // events for the channel watched by this chain watcher. Once clients no longer 322 // require the subscription, they should call the Cancel() method to allow the 323 // watcher to regain those committed resources. 324 func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription { 325 326 c.Lock() 327 clientID := c.clientID 328 c.clientID++ 329 c.Unlock() 330 331 log.Debugf("New ChainEventSubscription(id=%v) for ChannelPoint(%v)", 332 clientID, c.cfg.chanState.FundingOutpoint) 333 334 sub := &ChainEventSubscription{ 335 ChanPoint: c.cfg.chanState.FundingOutpoint, 336 RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1), 337 LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1), 338 CooperativeClosure: make(chan *CooperativeCloseInfo, 1), 339 ContractBreach: make(chan *BreachCloseInfo, 1), 340 Cancel: func() { 341 c.Lock() 342 delete(c.clientSubscriptions, clientID) 343 c.Unlock() 344 }, 345 } 346 347 c.Lock() 348 c.clientSubscriptions[clientID] = sub 349 c.Unlock() 350 351 return sub 352 } 353 354 // handleUnknownLocalState checks whether the passed spend _could_ be a local 355 // state that for some reason is unknown to us. This could be a state published 356 // by us before we lost state, which we will try to sweep. Or it could be one 357 // of our revoked states that somehow made it to the chain. If that's the case 358 // we cannot really hope that we'll be able to get our money back, but we'll 359 // try to sweep it anyway. If this is not an unknown local state, false is 360 // returned. 361 func (c *chainWatcher) handleUnknownLocalState( 362 commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, 363 chainSet *chainSet) (bool, error) { 364 365 // If the spend was a local commitment, at this point it must either be 366 // a past state (we breached!) or a future state (we lost state!). In 367 // either case, the only thing we can do is to attempt to sweep what is 368 // there. 369 370 // First, we'll re-derive our commitment point for this state since 371 // this is what we use to randomize each of the keys for this state. 372 commitSecret, err := c.cfg.chanState.RevocationProducer.AtIndex( 373 broadcastStateNum, 374 ) 375 if err != nil { 376 return false, err 377 } 378 commitPoint := input.ComputeCommitmentPoint(commitSecret[:]) 379 380 // Now that we have the commit point, we'll derive the tweaked local 381 // and remote keys for this state. We use our point as only we can 382 // revoke our own commitment. 383 commitKeyRing := lnwallet.DeriveCommitmentKeys( 384 commitPoint, true, c.cfg.chanState.ChanType, 385 &c.cfg.chanState.LocalChanCfg, &c.cfg.chanState.RemoteChanCfg, 386 ) 387 388 // With the keys derived, we'll construct the remote script that'll be 389 // present if they have a non-dust balance on the commitment. 390 var leaseExpiry uint32 391 if c.cfg.chanState.ChanType.HasLeaseExpiration() { 392 leaseExpiry = c.cfg.chanState.ThawHeight 393 } 394 remoteScript, _, err := lnwallet.CommitScriptToRemote( 395 c.cfg.chanState.ChanType, c.cfg.chanState.IsInitiator, 396 commitKeyRing.ToRemoteKey, leaseExpiry, 397 ) 398 if err != nil { 399 return false, err 400 } 401 402 // Next, we'll derive our script that includes the revocation base for 403 // the remote party allowing them to claim this output before the CSV 404 // delay if we breach. 405 localScript, err := lnwallet.CommitScriptToSelf( 406 c.cfg.chanState.ChanType, c.cfg.chanState.IsInitiator, 407 commitKeyRing.ToLocalKey, commitKeyRing.RevocationKey, 408 uint32(c.cfg.chanState.LocalChanCfg.CsvDelay), leaseExpiry, 409 ) 410 if err != nil { 411 return false, err 412 } 413 414 // With all our scripts assembled, we'll examine the outputs of the 415 // commitment transaction to determine if this is a local force close 416 // or not. 417 ourCommit := false 418 for _, output := range commitSpend.SpendingTx.TxOut { 419 pkScript := output.PkScript 420 421 switch { 422 case bytes.Equal(localScript.PkScript, pkScript): 423 ourCommit = true 424 425 case bytes.Equal(remoteScript.PkScript, pkScript): 426 ourCommit = true 427 } 428 } 429 430 // If the script is not present, this cannot be our commit. 431 if !ourCommit { 432 return false, nil 433 } 434 435 log.Warnf("Detected local unilateral close of unknown state %v "+ 436 "(our state=%v)", broadcastStateNum, 437 chainSet.localCommit.CommitHeight) 438 439 // If this is our commitment transaction, then we try to act even 440 // though we won't be able to sweep HTLCs. 441 chainSet.commitSet.ConfCommitKey = &LocalHtlcSet 442 if err := c.dispatchLocalForceClose( 443 commitSpend, broadcastStateNum, chainSet.commitSet, 444 ); err != nil { 445 return false, fmt.Errorf("unable to handle local"+ 446 "close for chan_point=%v: %v", 447 c.cfg.chanState.FundingOutpoint, err) 448 } 449 450 return true, nil 451 } 452 453 // chainSet includes all the information we need to dispatch a channel close 454 // event to any subscribers. 455 type chainSet struct { 456 // remoteStateNum is the commitment number of the lowest valid 457 // commitment the remote party holds from our PoV. This value is used 458 // to determine if the remote party is playing a state that's behind, 459 // in line, or ahead of the latest state we know for it. 460 remoteStateNum uint64 461 462 // commitSet includes information pertaining to the set of active HTLCs 463 // on each commitment. 464 commitSet CommitSet 465 466 // remoteCommit is the current commitment of the remote party. 467 remoteCommit channeldb.ChannelCommitment 468 469 // localCommit is our current commitment. 470 localCommit channeldb.ChannelCommitment 471 472 // remotePendingCommit points to the dangling commitment of the remote 473 // party, if it exists. If there's no dangling commitment, then this 474 // pointer will be nil. 475 remotePendingCommit *channeldb.ChannelCommitment 476 } 477 478 // newChainSet creates a new chainSet given the current up to date channel 479 // state. 480 func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) { 481 // First, we'll grab the current unrevoked commitments for ourselves 482 // and the remote party. 483 localCommit, remoteCommit, err := chanState.LatestCommitments() 484 if err != nil { 485 return nil, fmt.Errorf("unable to fetch channel state for "+ 486 "chan_point=%v", chanState.FundingOutpoint) 487 } 488 489 log.Debugf("ChannelPoint(%v): local_commit_type=%v, local_commit=%v", 490 chanState.FundingOutpoint, chanState.ChanType, 491 spew.Sdump(localCommit)) 492 log.Debugf("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v", 493 chanState.FundingOutpoint, chanState.ChanType, 494 spew.Sdump(remoteCommit)) 495 496 // Fetch the current known commit height for the remote party, and 497 // their pending commitment chain tip if it exists. 498 remoteStateNum := remoteCommit.CommitHeight 499 remoteChainTip, err := chanState.RemoteCommitChainTip() 500 if err != nil && err != channeldb.ErrNoPendingCommit { 501 return nil, fmt.Errorf("unable to obtain chain tip for "+ 502 "ChannelPoint(%v): %v", 503 chanState.FundingOutpoint, err) 504 } 505 506 // Now that we have all the possible valid commitments, we'll make the 507 // CommitSet the ChannelArbitrator will need in order to carry out its 508 // duty. 509 commitSet := CommitSet{ 510 HtlcSets: map[HtlcSetKey][]channeldb.HTLC{ 511 LocalHtlcSet: localCommit.Htlcs, 512 RemoteHtlcSet: remoteCommit.Htlcs, 513 }, 514 } 515 516 var remotePendingCommit *channeldb.ChannelCommitment 517 if remoteChainTip != nil { 518 remotePendingCommit = &remoteChainTip.Commitment 519 log.Debugf("ChannelPoint(%v): remote_pending_commit_type=%v, "+ 520 "remote_pending_commit=%v", chanState.FundingOutpoint, 521 chanState.ChanType, 522 spew.Sdump(remoteChainTip.Commitment)) 523 524 htlcs := remoteChainTip.Commitment.Htlcs 525 commitSet.HtlcSets[RemotePendingHtlcSet] = htlcs 526 } 527 528 // We'll now retrieve the latest state of the revocation store so we 529 // can populate the revocation information within the channel state 530 // object that we have. 531 // 532 // TODO(roasbeef): mutation is bad mkay 533 _, err = chanState.RemoteRevocationStore() 534 if err != nil { 535 return nil, fmt.Errorf("unable to fetch revocation state for "+ 536 "chan_point=%v", chanState.FundingOutpoint) 537 } 538 539 return &chainSet{ 540 remoteStateNum: remoteStateNum, 541 commitSet: commitSet, 542 localCommit: *localCommit, 543 remoteCommit: *remoteCommit, 544 remotePendingCommit: remotePendingCommit, 545 }, nil 546 } 547 548 // closeObserver is a dedicated goroutine that will watch for any closes of the 549 // channel that it's watching on chain. In the event of an on-chain event, the 550 // close observer will assembled the proper materials required to claim the 551 // funds of the channel on-chain (if required), then dispatch these as 552 // notifications to all subscribers. 553 func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { 554 defer c.wg.Done() 555 556 log.Infof("Close observer for ChannelPoint(%v) active", 557 c.cfg.chanState.FundingOutpoint) 558 559 select { 560 // We've detected a spend of the channel onchain! Depending on the type 561 // of spend, we'll act accordingly, so we'll examine the spending 562 // transaction to determine what we should do. 563 // 564 // TODO(Roasbeef): need to be able to ensure this only triggers 565 // on confirmation, to ensure if multiple txns are broadcast, we 566 // act on the one that's timestamped 567 case commitSpend, ok := <-spendNtfn.Spend: 568 // If the channel was closed, then this means that the notifier 569 // exited, so we will as well. 570 if !ok { 571 return 572 } 573 574 // Otherwise, the remote party might have broadcast a prior 575 // revoked state...!!! 576 commitTxBroadcast := commitSpend.SpendingTx 577 578 // First, we'll construct the chainset which includes all the 579 // data we need to dispatch an event to our subscribers about 580 // this possible channel close event. 581 chainSet, err := newChainSet(c.cfg.chanState) 582 if err != nil { 583 log.Errorf("unable to create commit set: %v", err) 584 return 585 } 586 587 // Decode the state hint encoded within the commitment 588 // transaction to determine if this is a revoked state or not. 589 obfuscator := c.stateHintObfuscator 590 broadcastStateNum := c.cfg.extractStateNumHint( 591 commitTxBroadcast, obfuscator, 592 ) 593 594 // We'll go on to check whether it could be our own commitment 595 // that was published and know is confirmed. 596 ok, err = c.handleKnownLocalState( 597 commitSpend, broadcastStateNum, chainSet, 598 ) 599 if err != nil { 600 log.Errorf("Unable to handle known local state: %v", 601 err) 602 return 603 } 604 605 if ok { 606 return 607 } 608 609 // Now that we know it is neither a non-cooperative closure nor 610 // a local close with the latest state, we check if it is the 611 // remote that closed with any prior or current state. 612 ok, err = c.handleKnownRemoteState( 613 commitSpend, broadcastStateNum, chainSet, 614 ) 615 if err != nil { 616 log.Errorf("Unable to handle known remote state: %v", 617 err) 618 return 619 } 620 621 if ok { 622 return 623 } 624 625 // Next, we'll check to see if this is a cooperative channel 626 // closure or not. This is characterized by having an input 627 // sequence number that's finalized. This won't happen with 628 // regular commitment transactions due to the state hint 629 // encoding scheme. 630 if commitTxBroadcast.TxIn[0].Sequence == wire.MaxTxInSequenceNum { 631 // TODO(roasbeef): rare but possible, need itest case 632 // for 633 err := c.dispatchCooperativeClose(commitSpend) 634 if err != nil { 635 log.Errorf("unable to handle co op close: %v", err) 636 } 637 return 638 } 639 640 log.Warnf("Unknown commitment broadcast for "+ 641 "ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint) 642 643 // We'll try to recover as best as possible from losing state. 644 // We first check if this was a local unknown state. This could 645 // happen if we force close, then lose state or attempt 646 // recovery before the commitment confirms. 647 ok, err = c.handleUnknownLocalState( 648 commitSpend, broadcastStateNum, chainSet, 649 ) 650 if err != nil { 651 log.Errorf("Unable to handle known local state: %v", 652 err) 653 return 654 } 655 656 if ok { 657 return 658 } 659 660 // Since it was neither a known remote state, nor a local state 661 // that was published, it most likely mean we lost state and 662 // the remote node closed. In this case we must start the DLP 663 // protocol in hope of getting our money back. 664 ok, err = c.handleUnknownRemoteState( 665 commitSpend, broadcastStateNum, chainSet, 666 ) 667 if err != nil { 668 log.Errorf("Unable to handle unknown remote state: %v", 669 err) 670 return 671 } 672 673 if ok { 674 return 675 } 676 677 log.Warnf("Unable to handle spending tx %v of channel point %v", 678 commitTxBroadcast.TxHash(), c.cfg.chanState.FundingOutpoint) 679 return 680 681 // The chainWatcher has been signalled to exit, so we'll do so now. 682 case <-c.quit: 683 return 684 } 685 } 686 687 // handleKnownLocalState checks whether the passed spend is a local state that 688 // is known to us (the current state). If so we will act on this state using 689 // the passed chainSet. If this is not a known local state, false is returned. 690 func (c *chainWatcher) handleKnownLocalState( 691 commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, 692 chainSet *chainSet) (bool, error) { 693 694 // If the channel is recovered, we won't have a local commit to check 695 // against, so immediately return. 696 if c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored) { 697 return false, nil 698 } 699 700 commitTxBroadcast := commitSpend.SpendingTx 701 commitHash := commitTxBroadcast.TxHash() 702 703 // Check whether our latest local state hit the chain. 704 if chainSet.localCommit.CommitTx.TxHash() != commitHash { 705 return false, nil 706 } 707 708 chainSet.commitSet.ConfCommitKey = &LocalHtlcSet 709 if err := c.dispatchLocalForceClose( 710 commitSpend, broadcastStateNum, chainSet.commitSet, 711 ); err != nil { 712 return false, fmt.Errorf("unable to handle local"+ 713 "close for chan_point=%v: %v", 714 c.cfg.chanState.FundingOutpoint, err) 715 } 716 717 return true, nil 718 } 719 720 // handleKnownRemoteState checks whether the passed spend is a remote state 721 // that is known to us (a revoked, current or pending state). If so we will act 722 // on this state using the passed chainSet. If this is not a known remote 723 // state, false is returned. 724 func (c *chainWatcher) handleKnownRemoteState( 725 commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, 726 chainSet *chainSet) (bool, error) { 727 728 // If the channel is recovered, we won't have any remote commit to 729 // check against, so imemdiately return. 730 if c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored) { 731 return false, nil 732 } 733 734 commitTxBroadcast := commitSpend.SpendingTx 735 commitHash := commitTxBroadcast.TxHash() 736 spendHeight := uint32(commitSpend.SpendingHeight) 737 738 switch { 739 // If the spending transaction matches the current latest state, then 740 // they've initiated a unilateral close. So we'll trigger the 741 // unilateral close signal so subscribers can clean up the state as 742 // necessary. 743 case chainSet.remoteCommit.CommitTx.TxHash() == commitHash: 744 log.Infof("Remote party broadcast base set, "+ 745 "commit_num=%v", chainSet.remoteStateNum) 746 747 chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet 748 err := c.dispatchRemoteForceClose( 749 commitSpend, chainSet.remoteCommit, 750 chainSet.commitSet, 751 c.cfg.chanState.RemoteCurrentRevocation, 752 ) 753 if err != nil { 754 return false, fmt.Errorf("unable to handle remote "+ 755 "close for chan_point=%v: %v", 756 c.cfg.chanState.FundingOutpoint, err) 757 } 758 759 return true, nil 760 761 // We'll also handle the case of the remote party broadcasting 762 // their commitment transaction which is one height above ours. 763 // This case can arise when we initiate a state transition, but 764 // the remote party has a fail crash _after_ accepting the new 765 // state, but _before_ sending their signature to us. 766 case chainSet.remotePendingCommit != nil && 767 chainSet.remotePendingCommit.CommitTx.TxHash() == commitHash: 768 769 log.Infof("Remote party broadcast pending set, "+ 770 "commit_num=%v", chainSet.remoteStateNum+1) 771 772 chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet 773 err := c.dispatchRemoteForceClose( 774 commitSpend, *chainSet.remotePendingCommit, 775 chainSet.commitSet, 776 c.cfg.chanState.RemoteNextRevocation, 777 ) 778 if err != nil { 779 return false, fmt.Errorf("unable to handle remote "+ 780 "close for chan_point=%v: %v", 781 c.cfg.chanState.FundingOutpoint, err) 782 } 783 784 return true, nil 785 } 786 787 // We check if we have a revoked state at this state num that matches 788 // the spend transaction. 789 retribution, err := lnwallet.NewBreachRetribution( 790 c.cfg.chanState, broadcastStateNum, spendHeight, 791 ) 792 793 switch { 794 795 // If we had no log entry at this height, this was not a revoked state. 796 case err == channeldb.ErrLogEntryNotFound: 797 return false, nil 798 case err == channeldb.ErrNoPastDeltas: 799 return false, nil 800 801 case err != nil: 802 return false, fmt.Errorf("unable to create breach "+ 803 "retribution: %v", err) 804 } 805 806 // We found a revoked state at this height, but it could still be our 807 // own broadcasted state we are looking at. Therefore check that the 808 // commit matches before assuming it was a breach. 809 if retribution.BreachTransaction.TxHash() != commitHash { 810 return false, nil 811 } 812 813 // Create an AnchorResolution for the breached state. 814 anchorRes, err := lnwallet.NewAnchorResolution( 815 c.cfg.chanState, commitSpend.SpendingTx, 816 ) 817 if err != nil { 818 return false, fmt.Errorf("unable to create anchor "+ 819 "resolution: %v", err) 820 } 821 822 // We'll set the ConfCommitKey here as the remote htlc set. This is 823 // only used to ensure a nil-pointer-dereference doesn't occur and is 824 // not used otherwise. The HTLC's may not exist for the 825 // RemotePendingHtlcSet. 826 chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet 827 828 // THEY'RE ATTEMPTING TO VIOLATE THE CONTRACT LAID OUT WITHIN THE 829 // PAYMENT CHANNEL. Therefore we close the signal indicating a revoked 830 // broadcast to allow subscribers to swiftly dispatch justice!!! 831 err = c.dispatchContractBreach( 832 commitSpend, chainSet, broadcastStateNum, retribution, 833 anchorRes, 834 ) 835 if err != nil { 836 return false, fmt.Errorf("unable to handle channel "+ 837 "breach for chan_point=%v: %v", 838 c.cfg.chanState.FundingOutpoint, err) 839 } 840 841 return true, nil 842 } 843 844 // handleUnknownRemoteState is the last attempt we make at reclaiming funds 845 // from the closed channel, by checkin whether the passed spend _could_ be a 846 // remote spend that is unknown to us (we lost state). We will try to initiate 847 // Data Loss Protection in order to restore our commit point and reclaim our 848 // funds from the channel. If we are not able to act on it, false is returned. 849 func (c *chainWatcher) handleUnknownRemoteState( 850 commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, 851 chainSet *chainSet) (bool, error) { 852 853 log.Warnf("Remote node broadcast state #%v, "+ 854 "which is more than 1 beyond best known "+ 855 "state #%v!!! Attempting recovery...", 856 broadcastStateNum, chainSet.remoteStateNum) 857 858 // If this isn't a tweakless commitment, then we'll need to wait for 859 // the remote party's latest unrevoked commitment point to be presented 860 // to us as we need this to sweep. Otherwise, we can dispatch the 861 // remote close and sweep immediately using a fake commitPoint as it 862 // isn't actually needed for recovery anymore. 863 commitPoint := c.cfg.chanState.RemoteCurrentRevocation 864 tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless() 865 if !tweaklessCommit { 866 commitPoint = c.waitForCommitmentPoint() 867 if commitPoint == nil { 868 return false, fmt.Errorf("unable to get commit point") 869 } 870 871 log.Infof("Recovered commit point(%x) for "+ 872 "channel(%v)! Now attempting to use it to "+ 873 "sweep our funds...", 874 commitPoint.SerializeCompressed(), 875 c.cfg.chanState.FundingOutpoint) 876 877 } else { 878 log.Infof("ChannelPoint(%v) is tweakless, "+ 879 "moving to sweep directly on chain", 880 c.cfg.chanState.FundingOutpoint) 881 } 882 883 // Since we don't have the commitment stored for this state, we'll just 884 // pass an empty commitment within the commitment set. Note that this 885 // means we won't be able to recover any HTLC funds. 886 // 887 // TODO(halseth): can we try to recover some HTLCs? 888 chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet 889 err := c.dispatchRemoteForceClose( 890 commitSpend, channeldb.ChannelCommitment{}, 891 chainSet.commitSet, commitPoint, 892 ) 893 if err != nil { 894 return false, fmt.Errorf("unable to handle remote "+ 895 "close for chan_point=%v: %v", 896 c.cfg.chanState.FundingOutpoint, err) 897 } 898 899 return true, nil 900 } 901 902 // toSelfAmount takes a transaction and returns the sum of all outputs that pay 903 // to a script that the wallet controls. If no outputs pay to us, then we 904 // return zero. This is possible as our output may have been trimmed due to 905 // being dust. 906 func (c *chainWatcher) toSelfAmount(tx *wire.MsgTx) dcrutil.Amount { 907 var selfAmt dcrutil.Amount 908 for _, txOut := range tx.TxOut { 909 _, addrs := stdscript.ExtractAddrs( 910 // Doesn't matter what net we actually pass in. 911 txOut.Version, txOut.PkScript, c.cfg.netParams, 912 ) 913 914 for _, addr := range addrs { 915 if c.cfg.isOurAddr(addr) { 916 selfAmt += dcrutil.Amount(txOut.Value) 917 } 918 } 919 } 920 921 return selfAmt 922 } 923 924 // dispatchCooperativeClose processed a detect cooperative channel closure. 925 // We'll use the spending transaction to locate our output within the 926 // transaction, then clean up the database state. We'll also dispatch a 927 // notification to all subscribers that the channel has been closed in this 928 // manner. 929 func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDetail) error { 930 broadcastTx := commitSpend.SpendingTx 931 932 log.Infof("Cooperative closure for ChannelPoint(%v): %v", 933 c.cfg.chanState.FundingOutpoint, spew.Sdump(broadcastTx)) 934 935 // If the input *is* final, then we'll check to see which output is 936 // ours. 937 localAmt := c.toSelfAmount(broadcastTx) 938 939 // Once this is known, we'll mark the state as fully closed in the 940 // database. We can do this as a cooperatively closed channel has all 941 // its outputs resolved after only one confirmation. 942 closeSummary := &channeldb.ChannelCloseSummary{ 943 ChanPoint: c.cfg.chanState.FundingOutpoint, 944 ChainHash: c.cfg.chanState.ChainHash, 945 ClosingTXID: *commitSpend.SpenderTxHash, 946 RemotePub: c.cfg.chanState.IdentityPub, 947 Capacity: c.cfg.chanState.Capacity, 948 CloseHeight: uint32(commitSpend.SpendingHeight), 949 SettledBalance: localAmt, 950 CloseType: channeldb.CooperativeClose, 951 ShortChanID: c.cfg.chanState.ShortChanID(), 952 IsPending: true, 953 RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation, 954 RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation, 955 LocalChanConfig: c.cfg.chanState.LocalChanCfg, 956 } 957 958 // Attempt to add a channel sync message to the close summary. 959 chanSync, err := c.cfg.chanState.ChanSyncMsg() 960 if err != nil { 961 log.Errorf("ChannelPoint(%v): unable to create channel sync "+ 962 "message: %v", c.cfg.chanState.FundingOutpoint, err) 963 } else { 964 closeSummary.LastChanSyncMsg = chanSync 965 } 966 967 // Create a summary of all the information needed to handle the 968 // cooperative closure. 969 closeInfo := &CooperativeCloseInfo{ 970 ChannelCloseSummary: closeSummary, 971 } 972 973 // With the event processed, we'll now notify all subscribers of the 974 // event. 975 c.Lock() 976 for _, sub := range c.clientSubscriptions { 977 select { 978 case sub.CooperativeClosure <- closeInfo: 979 case <-c.quit: 980 c.Unlock() 981 return fmt.Errorf("exiting") 982 } 983 } 984 c.Unlock() 985 986 return nil 987 } 988 989 // dispatchLocalForceClose processes a unilateral close by us being confirmed. 990 func (c *chainWatcher) dispatchLocalForceClose( 991 commitSpend *chainntnfs.SpendDetail, 992 stateNum uint64, commitSet CommitSet) error { 993 994 log.Infof("Local unilateral close of ChannelPoint(%v) "+ 995 "detected", c.cfg.chanState.FundingOutpoint) 996 997 forceClose, err := lnwallet.NewLocalForceCloseSummary( 998 c.cfg.chanState, c.cfg.signer, 999 commitSpend.SpendingTx, stateNum, 1000 ) 1001 if err != nil { 1002 return err 1003 } 1004 1005 // As we've detected that the channel has been closed, immediately 1006 // creating a close summary for future usage by related sub-systems. 1007 chanSnapshot := forceClose.ChanSnapshot 1008 closeSummary := &channeldb.ChannelCloseSummary{ 1009 ChanPoint: chanSnapshot.ChannelPoint, 1010 ChainHash: chanSnapshot.ChainHash, 1011 ClosingTXID: forceClose.CloseTx.TxHash(), 1012 RemotePub: &chanSnapshot.RemoteIdentity, 1013 Capacity: chanSnapshot.Capacity, 1014 CloseType: channeldb.LocalForceClose, 1015 IsPending: true, 1016 ShortChanID: c.cfg.chanState.ShortChanID(), 1017 CloseHeight: uint32(commitSpend.SpendingHeight), 1018 RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation, 1019 RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation, 1020 LocalChanConfig: c.cfg.chanState.LocalChanCfg, 1021 } 1022 1023 // If our commitment output isn't dust or we have active HTLC's on the 1024 // commitment transaction, then we'll populate the balances on the 1025 // close channel summary. 1026 if forceClose.CommitResolution != nil { 1027 closeSummary.SettledBalance = chanSnapshot.LocalBalance.ToAtoms() 1028 closeSummary.TimeLockedBalance = chanSnapshot.LocalBalance.ToAtoms() 1029 } 1030 for _, htlc := range forceClose.HtlcResolutions.OutgoingHTLCs { 1031 htlcValue := dcrutil.Amount(htlc.SweepSignDesc.Output.Value) 1032 closeSummary.TimeLockedBalance += htlcValue 1033 } 1034 1035 // Attempt to add a channel sync message to the close summary. 1036 chanSync, err := c.cfg.chanState.ChanSyncMsg() 1037 if err != nil { 1038 log.Errorf("ChannelPoint(%v): unable to create channel sync "+ 1039 "message: %v", c.cfg.chanState.FundingOutpoint, err) 1040 } else { 1041 closeSummary.LastChanSyncMsg = chanSync 1042 } 1043 1044 // With the event processed, we'll now notify all subscribers of the 1045 // event. 1046 closeInfo := &LocalUnilateralCloseInfo{ 1047 SpendDetail: commitSpend, 1048 LocalForceCloseSummary: forceClose, 1049 ChannelCloseSummary: closeSummary, 1050 CommitSet: commitSet, 1051 } 1052 c.Lock() 1053 for _, sub := range c.clientSubscriptions { 1054 select { 1055 case sub.LocalUnilateralClosure <- closeInfo: 1056 case <-c.quit: 1057 c.Unlock() 1058 return fmt.Errorf("exiting") 1059 } 1060 } 1061 c.Unlock() 1062 1063 return nil 1064 } 1065 1066 // dispatchRemoteForceClose processes a detected unilateral channel closure by 1067 // the remote party. This function will prepare a UnilateralCloseSummary which 1068 // will then be sent to any subscribers allowing them to resolve all our funds 1069 // in the channel on chain. Once this close summary is prepared, all registered 1070 // subscribers will receive a notification of this event. The commitPoint 1071 // argument should be set to the per_commitment_point corresponding to the 1072 // spending commitment. 1073 // 1074 // NOTE: The remoteCommit argument should be set to the stored commitment for 1075 // this particular state. If we don't have the commitment stored (should only 1076 // happen in case we have lost state) it should be set to an empty struct, in 1077 // which case we will attempt to sweep the non-HTLC output using the passed 1078 // commitPoint. 1079 func (c *chainWatcher) dispatchRemoteForceClose( 1080 commitSpend *chainntnfs.SpendDetail, 1081 remoteCommit channeldb.ChannelCommitment, 1082 commitSet CommitSet, commitPoint *secp256k1.PublicKey) error { 1083 1084 log.Infof("Unilateral close of ChannelPoint(%v) "+ 1085 "detected", c.cfg.chanState.FundingOutpoint) 1086 1087 // First, we'll create a closure summary that contains all the 1088 // materials required to let each subscriber sweep the funds in the 1089 // channel on-chain. 1090 uniClose, err := lnwallet.NewUnilateralCloseSummary( 1091 c.cfg.chanState, c.cfg.signer, commitSpend, 1092 remoteCommit, commitPoint, 1093 ) 1094 if err != nil { 1095 return err 1096 } 1097 1098 // With the event processed, we'll now notify all subscribers of the 1099 // event. 1100 c.Lock() 1101 for _, sub := range c.clientSubscriptions { 1102 select { 1103 case sub.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ 1104 UnilateralCloseSummary: uniClose, 1105 CommitSet: commitSet, 1106 }: 1107 case <-c.quit: 1108 c.Unlock() 1109 return fmt.Errorf("exiting") 1110 } 1111 } 1112 c.Unlock() 1113 1114 return nil 1115 } 1116 1117 // dispatchContractBreach processes a detected contract breached by the remote 1118 // party. This method is to be called once we detect that the remote party has 1119 // broadcast a prior revoked commitment state. This method well prepare all the 1120 // materials required to bring the cheater to justice, then notify all 1121 // registered subscribers of this event. 1122 func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail, 1123 chainSet *chainSet, broadcastStateNum uint64, 1124 retribution *lnwallet.BreachRetribution, 1125 anchorRes *lnwallet.AnchorResolution) error { 1126 1127 log.Warnf("Remote peer has breached the channel contract for "+ 1128 "ChannelPoint(%v). Revoked state #%v was broadcast!!!", 1129 c.cfg.chanState.FundingOutpoint, broadcastStateNum) 1130 1131 if err := c.cfg.chanState.MarkBorked(); err != nil { 1132 return fmt.Errorf("unable to mark channel as borked: %v", err) 1133 } 1134 1135 spendHeight := uint32(spendEvent.SpendingHeight) 1136 1137 log.Debugf("Punishment breach retribution created: %v", 1138 newLogClosure(func() string { 1139 retribution.KeyRing.LocalHtlcKey = nil 1140 retribution.KeyRing.RemoteHtlcKey = nil 1141 retribution.KeyRing.ToLocalKey = nil 1142 retribution.KeyRing.ToRemoteKey = nil 1143 retribution.KeyRing.RevocationKey = nil 1144 return spew.Sdump(retribution) 1145 })) 1146 1147 settledBalance := chainSet.remoteCommit.LocalBalance.ToAtoms() 1148 closeSummary := channeldb.ChannelCloseSummary{ 1149 ChanPoint: c.cfg.chanState.FundingOutpoint, 1150 ChainHash: c.cfg.chanState.ChainHash, 1151 ClosingTXID: *spendEvent.SpenderTxHash, 1152 CloseHeight: spendHeight, 1153 RemotePub: c.cfg.chanState.IdentityPub, 1154 Capacity: c.cfg.chanState.Capacity, 1155 SettledBalance: settledBalance, 1156 CloseType: channeldb.BreachClose, 1157 IsPending: true, 1158 ShortChanID: c.cfg.chanState.ShortChanID(), 1159 RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation, 1160 RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation, 1161 LocalChanConfig: c.cfg.chanState.LocalChanCfg, 1162 } 1163 1164 // Attempt to add a channel sync message to the close summary. 1165 chanSync, err := c.cfg.chanState.ChanSyncMsg() 1166 if err != nil { 1167 log.Errorf("ChannelPoint(%v): unable to create channel sync "+ 1168 "message: %v", c.cfg.chanState.FundingOutpoint, err) 1169 } else { 1170 closeSummary.LastChanSyncMsg = chanSync 1171 } 1172 1173 // Hand the retribution info over to the breach arbiter. This function 1174 // will wait for a response from the breach arbiter and then proceed to 1175 // send a BreachCloseInfo to the channel arbitrator. The channel arb 1176 // will then mark the channel as closed after resolutions and the 1177 // commit set are logged in the arbitrator log. 1178 if err := c.cfg.contractBreach(retribution); err != nil { 1179 log.Errorf("unable to hand breached contract off to "+ 1180 "BreachArbiter: %v", err) 1181 return err 1182 } 1183 1184 breachRes := &BreachResolution{ 1185 FundingOutPoint: c.cfg.chanState.FundingOutpoint, 1186 } 1187 1188 breachInfo := &BreachCloseInfo{ 1189 CommitHash: spendEvent.SpendingTx.TxHash(), 1190 BreachResolution: breachRes, 1191 AnchorResolution: anchorRes, 1192 CommitSet: chainSet.commitSet, 1193 CloseSummary: closeSummary, 1194 } 1195 1196 // With the event processed and channel closed, we'll now notify all 1197 // subscribers of the event. 1198 c.Lock() 1199 for _, sub := range c.clientSubscriptions { 1200 select { 1201 case sub.ContractBreach <- breachInfo: 1202 case <-c.quit: 1203 c.Unlock() 1204 return fmt.Errorf("quitting") 1205 } 1206 } 1207 c.Unlock() 1208 1209 return nil 1210 } 1211 1212 // waitForCommitmentPoint waits for the commitment point to be inserted into 1213 // the local database. We'll use this method in the DLP case, to wait for the 1214 // remote party to send us their point, as we can't proceed until we have that. 1215 func (c *chainWatcher) waitForCommitmentPoint() *secp256k1.PublicKey { 1216 // If we are lucky, the remote peer sent us the correct commitment 1217 // point during channel sync, such that we can sweep our funds. If we 1218 // cannot find the commit point, there's not much we can do other than 1219 // wait for us to retrieve it. We will attempt to retrieve it from the 1220 // peer each time we connect to it. 1221 // 1222 // TODO(halseth): actively initiate re-connection to the peer? 1223 backoff := minCommitPointPollTimeout 1224 for { 1225 commitPoint, err := c.cfg.chanState.DataLossCommitPoint() 1226 if err == nil { 1227 return commitPoint 1228 } 1229 1230 log.Errorf("Unable to retrieve commitment point for "+ 1231 "channel(%v) with lost state: %v. Retrying in %v.", 1232 c.cfg.chanState.FundingOutpoint, err, backoff) 1233 1234 select { 1235 // Wait before retrying, with an exponential backoff. 1236 case <-time.After(backoff): 1237 backoff = 2 * backoff 1238 if backoff > maxCommitPointPollTimeout { 1239 backoff = maxCommitPointPollTimeout 1240 } 1241 1242 case <-c.quit: 1243 return nil 1244 } 1245 } 1246 }