github.com/decred/dcrlnd@v0.7.6/contractcourt/chain_arbitrator.go (about) 1 package contractcourt 2 3 import ( 4 "errors" 5 "fmt" 6 "sync" 7 "sync/atomic" 8 "time" 9 10 "github.com/btcsuite/btcwallet/walletdb" 11 "github.com/decred/dcrd/chaincfg/chainhash" 12 "github.com/decred/dcrd/chaincfg/v3" 13 "github.com/decred/dcrd/txscript/v4/stdaddr" 14 "github.com/decred/dcrd/wire" 15 "github.com/decred/dcrlnd/chainntnfs" 16 "github.com/decred/dcrlnd/channeldb" 17 "github.com/decred/dcrlnd/clock" 18 "github.com/decred/dcrlnd/input" 19 "github.com/decred/dcrlnd/kvdb" 20 "github.com/decred/dcrlnd/labels" 21 "github.com/decred/dcrlnd/lnwallet" 22 "github.com/decred/dcrlnd/lnwallet/chainfee" 23 "github.com/decred/dcrlnd/lnwire" 24 ) 25 26 // ErrChainArbExiting signals that the chain arbitrator is shutting down. 27 var ErrChainArbExiting = errors.New("ChainArbitrator exiting") 28 29 // ResolutionMsg is a message sent by resolvers to outside sub-systems once an 30 // outgoing contract has been fully resolved. For multi-hop contracts, if we 31 // resolve the outgoing contract, we'll also need to ensure that the incoming 32 // contract is resolved as well. We package the items required to resolve the 33 // incoming contracts within this message. 34 type ResolutionMsg struct { 35 // SourceChan identifies the channel that this message is being sent 36 // from. This is the channel's short channel ID. 37 SourceChan lnwire.ShortChannelID 38 39 // HtlcIndex is the index of the contract within the original 40 // commitment trace. 41 HtlcIndex uint64 42 43 // Failure will be non-nil if the incoming contract should be canceled 44 // all together. This can happen if the outgoing contract was dust, if 45 // if the outgoing HTLC timed out. 46 Failure lnwire.FailureMessage 47 48 // PreImage will be non-nil if the incoming contract can successfully 49 // be redeemed. This can happen if we learn of the preimage from the 50 // outgoing HTLC on-chain. 51 PreImage *[32]byte 52 } 53 54 // ChainArbitratorConfig is a configuration struct that contains all the 55 // function closures and interface that required to arbitrate on-chain 56 // contracts for a particular chain. 57 type ChainArbitratorConfig struct { 58 // ChainHash is the chain that this arbitrator is to operate within. 59 ChainHash chainhash.Hash 60 61 // NetParams are the network parameters for the current chain. 62 NetParams *chaincfg.Params 63 64 // IncomingBroadcastDelta is the delta that we'll use to decide when to 65 // broadcast our commitment transaction if we have incoming htlcs. This 66 // value should be set based on our current fee estimation of the 67 // commitment transaction. We use this to determine when we should 68 // broadcast instead of just the HTLC timeout, as we want to ensure 69 // that the commitment transaction is already confirmed, by the time the 70 // HTLC expires. Otherwise we may end up not settling the htlc on-chain 71 // because the other party managed to time it out. 72 IncomingBroadcastDelta uint32 73 74 // OutgoingBroadcastDelta is the delta that we'll use to decide when to 75 // broadcast our commitment transaction if there are active outgoing 76 // htlcs. This value can be lower than the incoming broadcast delta. 77 OutgoingBroadcastDelta uint32 78 79 // NewSweepAddr is a function that returns a new address under control 80 // by the wallet. We'll use this to sweep any no-delay outputs as a 81 // result of unilateral channel closes. 82 // 83 // NOTE: This SHOULD return a p2wkh script. 84 NewSweepAddr func() ([]byte, error) 85 86 // PublishTx reliably broadcasts a transaction to the network. Once 87 // this function exits without an error, then they transaction MUST 88 // continually be rebroadcast if needed. 89 PublishTx func(*wire.MsgTx, string) error 90 91 // DeliverResolutionMsg is a function that will append an outgoing 92 // message to the "out box" for a ChannelLink. This is used to cancel 93 // backwards any HTLC's that are either dust, we're timing out, or 94 // settling on-chain to the incoming link. 95 DeliverResolutionMsg func(...ResolutionMsg) error 96 97 // MarkLinkInactive is a function closure that the ChainArbitrator will 98 // use to mark that active HTLC's shouldn't be attempted to be routed 99 // over a particular channel. This function will be called in that a 100 // ChannelArbitrator decides that it needs to go to chain in order to 101 // resolve contracts. 102 // 103 // TODO(roasbeef): rename, routing based 104 MarkLinkInactive func(wire.OutPoint) error 105 106 // ContractBreach is a function closure that the ChainArbitrator will 107 // use to notify the breachArbiter about a contract breach. It should 108 // only return a non-nil error when the breachArbiter has preserved 109 // the necessary breach info for this channel point. Once the breach 110 // resolution is persisted in the channel arbitrator, it will be safe 111 // to mark the channel closed. 112 ContractBreach func(wire.OutPoint, *lnwallet.BreachRetribution) error 113 114 // IsOurAddress is a function that returns true if the passed address 115 // is known to the underlying wallet. Otherwise, false should be 116 // returned. 117 IsOurAddress func(stdaddr.Address) bool 118 119 // IncubateOutput sends either an incoming HTLC, an outgoing HTLC, or 120 // both to the utxo nursery. Once this function returns, the nursery 121 // should have safely persisted the outputs to disk, and should start 122 // the process of incubation. This is used when a resolver wishes to 123 // pass off the output to the nursery as we're only waiting on an 124 // absolute/relative item block. 125 IncubateOutputs func(wire.OutPoint, *lnwallet.OutgoingHtlcResolution, 126 *lnwallet.IncomingHtlcResolution, uint32) error 127 128 // PreimageDB is a global store of all known pre-images. We'll use this 129 // to decide if we should broadcast a commitment transaction to claim 130 // an HTLC on-chain. 131 PreimageDB WitnessBeacon 132 133 // Notifier is an instance of a chain notifier we'll use to watch for 134 // certain on-chain events. 135 Notifier chainntnfs.ChainNotifier 136 137 // Signer is a signer backed by the active lnd node. This should be 138 // capable of producing a signature as specified by a valid 139 // SignDescriptor. 140 Signer input.Signer 141 142 // FeeEstimator will be used to return fee estimates. 143 FeeEstimator chainfee.Estimator 144 145 // ChainIO allows us to query the state of the current main chain. 146 ChainIO lnwallet.BlockChainIO 147 148 // DisableChannel disables a channel, resulting in it not being able to 149 // forward payments. 150 DisableChannel func(wire.OutPoint) error 151 152 // Sweeper allows resolvers to sweep their final outputs. 153 Sweeper UtxoSweeper 154 155 // Registry is the invoice database that is used by resolvers to lookup 156 // preimages and settle invoices. 157 Registry Registry 158 159 // NotifyClosedChannel is a function closure that the ChainArbitrator 160 // will use to notify the ChannelNotifier about a newly closed channel. 161 NotifyClosedChannel func(wire.OutPoint) 162 163 // NotifyFullyResolvedChannel is a function closure that the 164 // ChainArbitrator will use to notify the ChannelNotifier about a newly 165 // resolved channel. The main difference to NotifyClosedChannel is that 166 // in case of a local force close the NotifyClosedChannel is called when 167 // the published commitment transaction confirms while 168 // NotifyFullyResolvedChannel is only called when the channel is fully 169 // resolved (which includes sweeping any time locked funds). 170 NotifyFullyResolvedChannel func(point wire.OutPoint) 171 172 // OnionProcessor is used to decode onion payloads for on-chain 173 // resolution. 174 OnionProcessor OnionProcessor 175 176 // PaymentsExpirationGracePeriod indicates a time window we let the 177 // other node to cancel an outgoing htlc that our node has initiated and 178 // has timed out. 179 PaymentsExpirationGracePeriod time.Duration 180 181 // IsForwardedHTLC checks for a given htlc, identified by channel id and 182 // htlcIndex, if it is a forwarded one. 183 IsForwardedHTLC func(chanID lnwire.ShortChannelID, htlcIndex uint64) bool 184 185 // Clock is the clock implementation that ChannelArbitrator uses. 186 // It is useful for testing. 187 Clock clock.Clock 188 189 // SubscribeBreachComplete is used by the breachResolver to register a 190 // subscription that notifies when the breach resolution process is 191 // complete. 192 SubscribeBreachComplete func(op *wire.OutPoint, c chan struct{}) ( 193 bool, error) 194 } 195 196 // ChainArbitrator is a sub-system that oversees the on-chain resolution of all 197 // active, and channel that are in the "pending close" state. Within the 198 // contractcourt package, the ChainArbitrator manages a set of active 199 // ContractArbitrators. Each ContractArbitrators is responsible for watching 200 // the chain for any activity that affects the state of the channel, and also 201 // for monitoring each contract in order to determine if any on-chain activity is 202 // required. Outside sub-systems interact with the ChainArbitrator in order to 203 // forcibly exit a contract, update the set of live signals for each contract, 204 // and to receive reports on the state of contract resolution. 205 type ChainArbitrator struct { 206 started int32 // To be used atomically. 207 stopped int32 // To be used atomically. 208 209 sync.Mutex 210 211 // activeChannels is a map of all the active contracts that are still 212 // open, and not fully resolved. 213 activeChannels map[wire.OutPoint]*ChannelArbitrator 214 215 // activeWatchers is a map of all the active chainWatchers for channels 216 // that are still considered open. 217 activeWatchers map[wire.OutPoint]*chainWatcher 218 219 // cfg is the config struct for the arbitrator that contains all 220 // methods and interface it needs to operate. 221 cfg ChainArbitratorConfig 222 223 // chanSource will be used by the ChainArbitrator to fetch all the 224 // active channels that it must still watch over. 225 chanSource *channeldb.DB 226 227 quit chan struct{} 228 229 wg sync.WaitGroup 230 } 231 232 // NewChainArbitrator returns a new instance of the ChainArbitrator using the 233 // passed config struct, and backing persistent database. 234 func NewChainArbitrator(cfg ChainArbitratorConfig, 235 db *channeldb.DB) *ChainArbitrator { 236 237 return &ChainArbitrator{ 238 cfg: cfg, 239 activeChannels: make(map[wire.OutPoint]*ChannelArbitrator), 240 activeWatchers: make(map[wire.OutPoint]*chainWatcher), 241 chanSource: db, 242 quit: make(chan struct{}), 243 } 244 } 245 246 // arbChannel is a wrapper around an open channel that channel arbitrators 247 // interact with. 248 type arbChannel struct { 249 // channel is the in-memory channel state. 250 channel *channeldb.OpenChannel 251 252 // c references the chain arbitrator and is used by arbChannel 253 // internally. 254 c *ChainArbitrator 255 } 256 257 // NewAnchorResolutions returns the anchor resolutions for currently valid 258 // commitment transactions. 259 // 260 // NOTE: Part of the ArbChannel interface. 261 func (a *arbChannel) NewAnchorResolutions() (*lnwallet.AnchorResolutions, 262 error) { 263 264 // Get a fresh copy of the database state to base the anchor resolutions 265 // on. Unfortunately the channel instance that we have here isn't the 266 // same instance that is used by the link. 267 chanPoint := a.channel.FundingOutpoint 268 269 channel, err := a.c.chanSource.ChannelStateDB().FetchChannel( 270 nil, chanPoint, 271 ) 272 if err != nil { 273 return nil, err 274 } 275 276 chanMachine, err := lnwallet.NewLightningChannel( 277 a.c.cfg.Signer, channel, nil, a.c.cfg.NetParams, 278 ) 279 if err != nil { 280 return nil, err 281 } 282 283 return chanMachine.NewAnchorResolutions() 284 } 285 286 // ForceCloseChan should force close the contract that this attendant is 287 // watching over. We'll use this when we decide that we need to go to chain. It 288 // should in addition tell the switch to remove the corresponding link, such 289 // that we won't accept any new updates. The returned summary contains all items 290 // needed to eventually resolve all outputs on chain. 291 // 292 // NOTE: Part of the ArbChannel interface. 293 func (a *arbChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error) { 294 // First, we mark the channel as borked, this ensure 295 // that no new state transitions can happen, and also 296 // that the link won't be loaded into the switch. 297 if err := a.channel.MarkBorked(); err != nil { 298 return nil, err 299 } 300 301 // With the channel marked as borked, we'll now remove 302 // the link from the switch if its there. If the link 303 // is active, then this method will block until it 304 // exits. 305 chanPoint := a.channel.FundingOutpoint 306 307 if err := a.c.cfg.MarkLinkInactive(chanPoint); err != nil { 308 log.Errorf("unable to mark link inactive: %v", err) 309 } 310 311 // Now that we know the link can't mutate the channel 312 // state, we'll read the channel from disk the target 313 // channel according to its channel point. 314 channel, err := a.c.chanSource.ChannelStateDB().FetchChannel( 315 nil, chanPoint, 316 ) 317 if err != nil { 318 return nil, err 319 } 320 321 // Finally, we'll force close the channel completing 322 // the force close workflow. 323 chanMachine, err := lnwallet.NewLightningChannel( 324 a.c.cfg.Signer, channel, nil, a.c.cfg.NetParams, 325 ) 326 if err != nil { 327 return nil, err 328 } 329 return chanMachine.ForceClose() 330 } 331 332 // newActiveChannelArbitrator creates a new instance of an active channel 333 // arbitrator given the state of the target channel. 334 func newActiveChannelArbitrator(channel *channeldb.OpenChannel, 335 c *ChainArbitrator, chanEvents *ChainEventSubscription) (*ChannelArbitrator, error) { 336 337 log.Tracef("Creating ChannelArbitrator for ChannelPoint(%v)", 338 channel.FundingOutpoint) 339 340 // TODO(roasbeef): fetch best height (or pass in) so can ensure block 341 // epoch delivers all the notifications to 342 343 chanPoint := channel.FundingOutpoint 344 345 // Next we'll create the matching configuration struct that contains 346 // all interfaces and methods the arbitrator needs to do its job. 347 arbCfg := ChannelArbitratorConfig{ 348 ChanPoint: chanPoint, 349 Channel: c.getArbChannel(channel), 350 ShortChanID: channel.ShortChanID(), 351 352 MarkCommitmentBroadcasted: channel.MarkCommitmentBroadcasted, 353 MarkChannelClosed: func(summary *channeldb.ChannelCloseSummary, 354 statuses ...channeldb.ChannelStatus) error { 355 356 err := channel.CloseChannel(summary, statuses...) 357 if err != nil { 358 return err 359 } 360 c.cfg.NotifyClosedChannel(summary.ChanPoint) 361 return nil 362 }, 363 IsPendingClose: false, 364 ChainArbitratorConfig: c.cfg, 365 ChainEvents: chanEvents, 366 PutResolverReport: func(tx kvdb.RwTx, 367 report *channeldb.ResolverReport) error { 368 369 return c.chanSource.PutResolverReport( 370 tx, c.cfg.ChainHash, &channel.FundingOutpoint, 371 report, 372 ) 373 }, 374 FetchHistoricalChannel: func() (*channeldb.OpenChannel, error) { 375 chanStateDB := c.chanSource.ChannelStateDB() 376 return chanStateDB.FetchHistoricalChannel(&chanPoint) 377 }, 378 } 379 380 // The final component needed is an arbitrator log that the arbitrator 381 // will use to keep track of its internal state using a backed 382 // persistent log. 383 // 384 // TODO(roasbeef); abstraction leak... 385 // * rework: adaptor method to set log scope w/ factory func 386 chanLog, err := newBoltArbitratorLog( 387 c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint, 388 ) 389 if err != nil { 390 return nil, err 391 } 392 393 arbCfg.MarkChannelResolved = func() error { 394 if c.cfg.NotifyFullyResolvedChannel != nil { 395 c.cfg.NotifyFullyResolvedChannel(chanPoint) 396 } 397 398 return c.ResolveContract(chanPoint) 399 } 400 401 // Finally, we'll need to construct a series of htlc Sets based on all 402 // currently known valid commitments. 403 htlcSets := make(map[HtlcSetKey]htlcSet) 404 htlcSets[LocalHtlcSet] = newHtlcSet(channel.LocalCommitment.Htlcs) 405 htlcSets[RemoteHtlcSet] = newHtlcSet(channel.RemoteCommitment.Htlcs) 406 407 pendingRemoteCommitment, err := channel.RemoteCommitChainTip() 408 if err != nil && err != channeldb.ErrNoPendingCommit { 409 return nil, err 410 } 411 if pendingRemoteCommitment != nil { 412 htlcSets[RemotePendingHtlcSet] = newHtlcSet( 413 pendingRemoteCommitment.Commitment.Htlcs, 414 ) 415 } 416 417 return NewChannelArbitrator( 418 arbCfg, htlcSets, chanLog, 419 ), nil 420 } 421 422 // getArbChannel returns an open channel wrapper for use by channel arbitrators. 423 func (c *ChainArbitrator) getArbChannel( 424 channel *channeldb.OpenChannel) *arbChannel { 425 426 return &arbChannel{ 427 channel: channel, 428 c: c, 429 } 430 } 431 432 // ResolveContract marks a contract as fully resolved within the database. 433 // This is only to be done once all contracts which were live on the channel 434 // before hitting the chain have been resolved. 435 func (c *ChainArbitrator) ResolveContract(chanPoint wire.OutPoint) error { 436 437 log.Infof("Marking ChannelPoint(%v) fully resolved", chanPoint) 438 439 // First, we'll we'll mark the channel as fully closed from the PoV of 440 // the channel source. 441 err := c.chanSource.ChannelStateDB().MarkChanFullyClosed(&chanPoint) 442 if err != nil { 443 log.Errorf("ChainArbitrator: unable to mark ChannelPoint(%v) "+ 444 "fully closed: %v", chanPoint, err) 445 return err 446 } 447 448 // Now that the channel has been marked as fully closed, we'll stop 449 // both the channel arbitrator and chain watcher for this channel if 450 // they're still active. 451 var arbLog ArbitratorLog 452 c.Lock() 453 chainArb := c.activeChannels[chanPoint] 454 delete(c.activeChannels, chanPoint) 455 456 chainWatcher := c.activeWatchers[chanPoint] 457 delete(c.activeWatchers, chanPoint) 458 c.Unlock() 459 460 if chainArb != nil { 461 arbLog = chainArb.log 462 463 if err := chainArb.Stop(); err != nil { 464 log.Warnf("unable to stop ChannelArbitrator(%v): %v", 465 chanPoint, err) 466 } 467 } 468 if chainWatcher != nil { 469 if err := chainWatcher.Stop(); err != nil { 470 log.Warnf("unable to stop ChainWatcher(%v): %v", 471 chanPoint, err) 472 } 473 } 474 475 // Once this has been marked as resolved, we'll wipe the log that the 476 // channel arbitrator was using to store its persistent state. We do 477 // this after marking the channel resolved, as otherwise, the 478 // arbitrator would be re-created, and think it was starting from the 479 // default state. 480 if arbLog != nil { 481 if err := arbLog.WipeHistory(); err != nil { 482 return err 483 } 484 } 485 486 return nil 487 } 488 489 // Start launches all goroutines that the ChainArbitrator needs to operate. 490 func (c *ChainArbitrator) Start() error { 491 if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { 492 return nil 493 } 494 495 log.Tracef("Starting ChainArbitrator") 496 497 // First, we'll fetch all the channels that are still open, in order to 498 // collect them within our set of active contracts. 499 openChannels, err := c.chanSource.ChannelStateDB().FetchAllChannels() 500 if err != nil { 501 return err 502 } 503 504 if len(openChannels) > 0 { 505 log.Infof("Creating ChannelArbitrators for %v active channels", 506 len(openChannels)) 507 } 508 509 // For each open channel, we'll configure then launch a corresponding 510 // ChannelArbitrator. 511 for _, channel := range openChannels { 512 chanPoint := channel.FundingOutpoint 513 channel := channel 514 515 // First, we'll create an active chainWatcher for this channel 516 // to ensure that we detect any relevant on chain events. 517 breachClosure := func(ret *lnwallet.BreachRetribution) error { 518 return c.cfg.ContractBreach(chanPoint, ret) 519 } 520 521 chainWatcher, err := newChainWatcher( 522 chainWatcherConfig{ 523 chanState: channel, 524 notifier: c.cfg.Notifier, 525 signer: c.cfg.Signer, 526 isOurAddr: c.cfg.IsOurAddress, 527 contractBreach: breachClosure, 528 extractStateNumHint: lnwallet.GetStateNumHint, 529 netParams: c.cfg.NetParams, 530 }, 531 ) 532 if err != nil { 533 return err 534 } 535 536 c.activeWatchers[chanPoint] = chainWatcher 537 channelArb, err := newActiveChannelArbitrator( 538 channel, c, chainWatcher.SubscribeChannelEvents(), 539 ) 540 if err != nil { 541 return err 542 } 543 544 c.activeChannels[chanPoint] = channelArb 545 546 // Republish any closing transactions for this channel. 547 err = c.publishClosingTxs(channel) 548 if err != nil { 549 return err 550 } 551 } 552 553 // In addition to the channels that we know to be open, we'll also 554 // launch arbitrators to finishing resolving any channels that are in 555 // the pending close state. 556 closingChannels, err := c.chanSource.ChannelStateDB().FetchClosedChannels( 557 true, 558 ) 559 if err != nil { 560 return err 561 } 562 563 if len(closingChannels) > 0 { 564 log.Infof("Creating ChannelArbitrators for %v closing channels", 565 len(closingChannels)) 566 } 567 568 // Next, for each channel is the closing state, we'll launch a 569 // corresponding more restricted resolver, as we don't have to watch 570 // the chain any longer, only resolve the contracts on the confirmed 571 // commitment. 572 for _, closeChanInfo := range closingChannels { 573 // We can leave off the CloseContract and ForceCloseChan 574 // methods as the channel is already closed at this point. 575 chanPoint := closeChanInfo.ChanPoint 576 arbCfg := ChannelArbitratorConfig{ 577 ChanPoint: chanPoint, 578 ShortChanID: closeChanInfo.ShortChanID, 579 ChainArbitratorConfig: c.cfg, 580 ChainEvents: &ChainEventSubscription{}, 581 IsPendingClose: true, 582 ClosingHeight: closeChanInfo.CloseHeight, 583 CloseType: closeChanInfo.CloseType, 584 PutResolverReport: func(tx kvdb.RwTx, 585 report *channeldb.ResolverReport) error { 586 587 return c.chanSource.PutResolverReport( 588 tx, c.cfg.ChainHash, &chanPoint, report, 589 ) 590 }, 591 FetchHistoricalChannel: func() (*channeldb.OpenChannel, error) { 592 chanStateDB := c.chanSource.ChannelStateDB() 593 return chanStateDB.FetchHistoricalChannel(&chanPoint) 594 }, 595 } 596 chanLog, err := newBoltArbitratorLog( 597 c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint, 598 ) 599 if err != nil { 600 return err 601 } 602 arbCfg.MarkChannelResolved = func() error { 603 if c.cfg.NotifyFullyResolvedChannel != nil { 604 c.cfg.NotifyFullyResolvedChannel(chanPoint) 605 } 606 607 return c.ResolveContract(chanPoint) 608 } 609 610 // We can also leave off the set of HTLC's here as since the 611 // channel is already in the process of being full resolved, no 612 // new HTLC's will be added. 613 c.activeChannels[chanPoint] = NewChannelArbitrator( 614 arbCfg, nil, chanLog, 615 ) 616 } 617 618 // Now, we'll start all chain watchers in parallel to shorten start up 619 // duration. In neutrino mode, this allows spend registrations to take 620 // advantage of batch spend reporting, instead of doing a single rescan 621 // per chain watcher. 622 // 623 // NOTE: After this point, we Stop the chain arb to ensure that any 624 // lingering goroutines are cleaned up before exiting. 625 watcherErrs := make(chan error, len(c.activeWatchers)) 626 var wg sync.WaitGroup 627 for _, watcher := range c.activeWatchers { 628 wg.Add(1) 629 go func(w *chainWatcher) { 630 defer wg.Done() 631 select { 632 case watcherErrs <- w.Start(): 633 case <-c.quit: 634 watcherErrs <- ErrChainArbExiting 635 } 636 }(watcher) 637 } 638 639 // Once all chain watchers have been started, seal the err chan to 640 // signal the end of the err stream. 641 go func() { 642 wg.Wait() 643 close(watcherErrs) 644 }() 645 646 // stopAndLog is a helper function which shuts down the chain arb and 647 // logs errors if they occur. 648 stopAndLog := func() { 649 if err := c.Stop(); err != nil { 650 log.Errorf("ChainArbitrator could not shutdown: %v", err) 651 } 652 } 653 654 // Handle all errors returned from spawning our chain watchers. If any 655 // of them failed, we will stop the chain arb to shutdown any active 656 // goroutines. 657 for err := range watcherErrs { 658 if err != nil { 659 stopAndLog() 660 return err 661 } 662 } 663 664 // Before we start all of our arbitrators, we do a preliminary state 665 // lookup so that we can combine all of these lookups in a single db 666 // transaction. 667 var startStates map[wire.OutPoint]*chanArbStartState 668 669 err = kvdb.View(c.chanSource, func(tx walletdb.ReadTx) error { 670 for _, arbitrator := range c.activeChannels { 671 startState, err := arbitrator.getStartState(tx) 672 if err != nil { 673 return err 674 } 675 676 startStates[arbitrator.cfg.ChanPoint] = startState 677 } 678 679 return nil 680 }, func() { 681 startStates = make( 682 map[wire.OutPoint]*chanArbStartState, 683 len(c.activeChannels), 684 ) 685 }) 686 if err != nil { 687 stopAndLog() 688 return err 689 } 690 691 // Launch all the goroutines for each arbitrator so they can carry out 692 // their duties. 693 for _, arbitrator := range c.activeChannels { 694 startState, ok := startStates[arbitrator.cfg.ChanPoint] 695 if !ok { 696 stopAndLog() 697 return fmt.Errorf("arbitrator: %v has no start state", 698 arbitrator.cfg.ChanPoint) 699 } 700 701 if err := arbitrator.Start(startState); err != nil { 702 stopAndLog() 703 return err 704 } 705 } 706 707 // Subscribe to a single stream of block epoch notifications that we 708 // will dispatch to all active arbitrators. 709 blockEpoch, err := c.cfg.Notifier.RegisterBlockEpochNtfn(nil) 710 if err != nil { 711 return err 712 } 713 714 // Start our goroutine which will dispatch blocks to each arbitrator. 715 c.wg.Add(1) 716 go func() { 717 defer c.wg.Done() 718 c.dispatchBlocks(blockEpoch) 719 }() 720 721 // TODO(roasbeef): eventually move all breach watching here 722 723 return nil 724 } 725 726 // blockRecipient contains the information we need to dispatch a block to a 727 // channel arbitrator. 728 type blockRecipient struct { 729 // chanPoint is the funding outpoint of the channel. 730 chanPoint wire.OutPoint 731 732 // blocks is the channel that new block heights are sent into. This 733 // channel should be sufficiently buffered as to not block the sender. 734 blocks chan<- int32 735 736 // quit is closed if the receiving entity is shutting down. 737 quit chan struct{} 738 } 739 740 // dispatchBlocks consumes a block epoch notification stream and dispatches 741 // blocks to each of the chain arb's active channel arbitrators. This function 742 // must be run in a goroutine. 743 func (c *ChainArbitrator) dispatchBlocks( 744 blockEpoch *chainntnfs.BlockEpochEvent) { 745 746 // getRecipients is a helper function which acquires the chain arb 747 // lock and returns a set of block recipients which can be used to 748 // dispatch blocks. 749 getRecipients := func() []blockRecipient { 750 c.Lock() 751 blocks := make([]blockRecipient, 0, len(c.activeChannels)) 752 for _, channel := range c.activeChannels { 753 blocks = append(blocks, blockRecipient{ 754 chanPoint: channel.cfg.ChanPoint, 755 blocks: channel.blocks, 756 quit: channel.quit, 757 }) 758 } 759 c.Unlock() 760 761 return blocks 762 } 763 764 // On exit, cancel our blocks subscription and close each block channel 765 // so that the arbitrators know they will no longer be receiving blocks. 766 defer func() { 767 blockEpoch.Cancel() 768 769 recipients := getRecipients() 770 for _, recipient := range recipients { 771 close(recipient.blocks) 772 } 773 }() 774 775 // Consume block epochs until we receive the instruction to shutdown. 776 for { 777 select { 778 // Consume block epochs, exiting if our subscription is 779 // terminated. 780 case block, ok := <-blockEpoch.Epochs: 781 if !ok { 782 log.Trace("dispatchBlocks block epoch " + 783 "cancelled") 784 return 785 } 786 787 // Get the set of currently active channels block 788 // subscription channels and dispatch the block to 789 // each. 790 for _, recipient := range getRecipients() { 791 select { 792 // Deliver the block to the arbitrator. 793 case recipient.blocks <- block.Height: 794 795 // If the recipient is shutting down, exit 796 // without delivering the block. This may be 797 // the case when two blocks are mined in quick 798 // succession, and the arbitrator resolves 799 // after the first block, and does not need to 800 // consume the second block. 801 case <-recipient.quit: 802 log.Debugf("channel: %v exit without "+ 803 "receiving block: %v", 804 recipient.chanPoint, 805 block.Height) 806 807 // If the chain arb is shutting down, we don't 808 // need to deliver any more blocks (everything 809 // will be shutting down). 810 case <-c.quit: 811 return 812 } 813 } 814 815 // Exit if the chain arbitrator is shutting down. 816 case <-c.quit: 817 return 818 } 819 } 820 } 821 822 // publishClosingTxs will load any stored cooperative or unilater closing 823 // transactions and republish them. This helps ensure propagation of the 824 // transactions in the event that prior publications failed. 825 func (c *ChainArbitrator) publishClosingTxs( 826 channel *channeldb.OpenChannel) error { 827 828 // If the channel has had its unilateral close broadcasted already, 829 // republish it in case it didn't propagate. 830 if channel.HasChanStatus(channeldb.ChanStatusCommitBroadcasted) { 831 err := c.rebroadcast( 832 channel, channeldb.ChanStatusCommitBroadcasted, 833 ) 834 if err != nil { 835 return err 836 } 837 } 838 839 // If the channel has had its cooperative close broadcasted 840 // already, republish it in case it didn't propagate. 841 if channel.HasChanStatus(channeldb.ChanStatusCoopBroadcasted) { 842 err := c.rebroadcast( 843 channel, channeldb.ChanStatusCoopBroadcasted, 844 ) 845 if err != nil { 846 return err 847 } 848 } 849 850 return nil 851 } 852 853 // rebroadcast is a helper method which will republish the unilateral or 854 // cooperative close transaction or a channel in a particular state. 855 // 856 // NOTE: There is no risk to caling this method if the channel isn't in either 857 // CommimentBroadcasted or CoopBroadcasted, but the logs will be misleading. 858 func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel, 859 state channeldb.ChannelStatus) error { 860 861 chanPoint := channel.FundingOutpoint 862 863 var ( 864 closeTx *wire.MsgTx 865 kind string 866 err error 867 ) 868 switch state { 869 case channeldb.ChanStatusCommitBroadcasted: 870 kind = "force" 871 closeTx, err = channel.BroadcastedCommitment() 872 873 case channeldb.ChanStatusCoopBroadcasted: 874 kind = "coop" 875 closeTx, err = channel.BroadcastedCooperative() 876 877 default: 878 return fmt.Errorf("unknown closing state: %v", state) 879 } 880 881 switch { 882 883 // This can happen for channels that had their closing tx published 884 // before we started storing it to disk. 885 case err == channeldb.ErrNoCloseTx: 886 log.Warnf("Channel %v is in state %v, but no %s closing tx "+ 887 "to re-publish...", chanPoint, state, kind) 888 return nil 889 890 case err != nil: 891 return err 892 } 893 894 log.Infof("Re-publishing %s close tx(%v) for channel %v", 895 kind, closeTx.TxHash(), chanPoint) 896 897 label := labels.MakeLabel( 898 labels.LabelTypeChannelClose, &channel.ShortChannelID, 899 ) 900 err = c.cfg.PublishTx(closeTx, label) 901 if err != nil && err != lnwallet.ErrDoubleSpend { 902 log.Warnf("Unable to broadcast %s close tx(%v): %v", 903 kind, closeTx.TxHash(), err) 904 } 905 906 return nil 907 } 908 909 // Stop signals the ChainArbitrator to trigger a graceful shutdown. Any active 910 // channel arbitrators will be signalled to exit, and this method will block 911 // until they've all exited. 912 func (c *ChainArbitrator) Stop() error { 913 if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) { 914 return nil 915 } 916 917 log.Info("ChainArbitrator shutting down") 918 919 close(c.quit) 920 921 var ( 922 activeWatchers = make(map[wire.OutPoint]*chainWatcher) 923 activeChannels = make(map[wire.OutPoint]*ChannelArbitrator) 924 ) 925 926 // Copy the current set of active watchers and arbitrators to shutdown. 927 // We don't want to hold the lock when shutting down each watcher or 928 // arbitrator individually, as they may need to acquire this mutex. 929 c.Lock() 930 for chanPoint, watcher := range c.activeWatchers { 931 activeWatchers[chanPoint] = watcher 932 } 933 for chanPoint, arbitrator := range c.activeChannels { 934 activeChannels[chanPoint] = arbitrator 935 } 936 c.Unlock() 937 938 for chanPoint, watcher := range activeWatchers { 939 log.Tracef("Attempting to stop ChainWatcher(%v)", 940 chanPoint) 941 942 if err := watcher.Stop(); err != nil { 943 log.Errorf("unable to stop watcher for "+ 944 "ChannelPoint(%v): %v", chanPoint, err) 945 } 946 } 947 for chanPoint, arbitrator := range activeChannels { 948 log.Tracef("Attempting to stop ChannelArbitrator(%v)", 949 chanPoint) 950 951 if err := arbitrator.Stop(); err != nil { 952 log.Errorf("unable to stop arbitrator for "+ 953 "ChannelPoint(%v): %v", chanPoint, err) 954 } 955 } 956 957 c.wg.Wait() 958 959 return nil 960 } 961 962 // ContractUpdate is a message packages the latest set of active HTLCs on a 963 // commitment, and also identifies which commitment received a new set of 964 // HTLCs. 965 type ContractUpdate struct { 966 // HtlcKey identifies which commitment the HTLCs below are present on. 967 HtlcKey HtlcSetKey 968 969 // Htlcs are the of active HTLCs on the commitment identified by the 970 // above HtlcKey. 971 Htlcs []channeldb.HTLC 972 } 973 974 // ContractSignals wraps the two signals that affect the state of a channel 975 // being watched by an arbitrator. The two signals we care about are: the 976 // channel has a new set of HTLC's, and the remote party has just broadcast 977 // their version of the commitment transaction. 978 type ContractSignals struct { 979 // HtlcUpdates is a channel that the link will use to update the 980 // designated channel arbitrator when the set of HTLCs on any valid 981 // commitment changes. 982 HtlcUpdates chan *ContractUpdate 983 984 // ShortChanID is the up to date short channel ID for a contract. This 985 // can change either if when the contract was added it didn't yet have 986 // a stable identifier, or in the case of a reorg. 987 ShortChanID lnwire.ShortChannelID 988 } 989 990 // UpdateContractSignals sends a set of active, up to date contract signals to 991 // the ChannelArbitrator which is has been assigned to the channel infield by 992 // the passed channel point. 993 func (c *ChainArbitrator) UpdateContractSignals(chanPoint wire.OutPoint, 994 signals *ContractSignals) error { 995 996 log.Infof("Attempting to update ContractSignals for ChannelPoint(%v)", 997 chanPoint) 998 999 c.Lock() 1000 arbitrator, ok := c.activeChannels[chanPoint] 1001 c.Unlock() 1002 if !ok { 1003 return fmt.Errorf("unable to find arbitrator") 1004 } 1005 1006 arbitrator.UpdateContractSignals(signals) 1007 1008 return nil 1009 } 1010 1011 // GetChannelArbitrator safely returns the channel arbitrator for a given 1012 // channel outpoint. 1013 func (c *ChainArbitrator) GetChannelArbitrator(chanPoint wire.OutPoint) ( 1014 *ChannelArbitrator, error) { 1015 1016 c.Lock() 1017 arbitrator, ok := c.activeChannels[chanPoint] 1018 c.Unlock() 1019 if !ok { 1020 return nil, fmt.Errorf("unable to find arbitrator") 1021 } 1022 1023 return arbitrator, nil 1024 } 1025 1026 // forceCloseReq is a request sent from an outside sub-system to the arbitrator 1027 // that watches a particular channel to broadcast the commitment transaction, 1028 // and enter the resolution phase of the channel. 1029 type forceCloseReq struct { 1030 // errResp is a channel that will be sent upon either in the case of 1031 // force close success (nil error), or in the case on an error. 1032 // 1033 // NOTE; This channel MUST be buffered. 1034 errResp chan error 1035 1036 // closeTx is a channel that carries the transaction which ultimately 1037 // closed out the channel. 1038 closeTx chan *wire.MsgTx 1039 } 1040 1041 // ForceCloseContract attempts to force close the channel infield by the passed 1042 // channel point. A force close will immediately terminate the contract, 1043 // causing it to enter the resolution phase. If the force close was successful, 1044 // then the force close transaction itself will be returned. 1045 // 1046 // TODO(roasbeef): just return the summary itself? 1047 func (c *ChainArbitrator) ForceCloseContract(chanPoint wire.OutPoint) (*wire.MsgTx, error) { 1048 c.Lock() 1049 arbitrator, ok := c.activeChannels[chanPoint] 1050 c.Unlock() 1051 if !ok { 1052 return nil, fmt.Errorf("unable to find arbitrator") 1053 } 1054 1055 log.Infof("Attempting to force close ChannelPoint(%v)", chanPoint) 1056 1057 // Before closing, we'll attempt to send a disable update for the 1058 // channel. We do so before closing the channel as otherwise the current 1059 // edge policy won't be retrievable from the graph. 1060 if err := c.cfg.DisableChannel(chanPoint); err != nil { 1061 log.Warnf("Unable to disable channel %v on "+ 1062 "close: %v", chanPoint, err) 1063 } 1064 1065 errChan := make(chan error, 1) 1066 respChan := make(chan *wire.MsgTx, 1) 1067 1068 // With the channel found, and the request crafted, we'll send over a 1069 // force close request to the arbitrator that watches this channel. 1070 select { 1071 case arbitrator.forceCloseReqs <- &forceCloseReq{ 1072 errResp: errChan, 1073 closeTx: respChan, 1074 }: 1075 case <-c.quit: 1076 return nil, ErrChainArbExiting 1077 } 1078 1079 // We'll await two responses: the error response, and the transaction 1080 // that closed out the channel. 1081 select { 1082 case err := <-errChan: 1083 if err != nil { 1084 return nil, err 1085 } 1086 case <-c.quit: 1087 return nil, ErrChainArbExiting 1088 } 1089 1090 var closeTx *wire.MsgTx 1091 select { 1092 case closeTx = <-respChan: 1093 case <-c.quit: 1094 return nil, ErrChainArbExiting 1095 } 1096 1097 return closeTx, nil 1098 } 1099 1100 // WatchNewChannel sends the ChainArbitrator a message to create a 1101 // ChannelArbitrator tasked with watching over a new channel. Once a new 1102 // channel has finished its final funding flow, it should be registered with 1103 // the ChainArbitrator so we can properly react to any on-chain events. 1104 func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) error { 1105 c.Lock() 1106 defer c.Unlock() 1107 1108 log.Infof("Creating new ChannelArbitrator for ChannelPoint(%v)", 1109 newChan.FundingOutpoint) 1110 1111 // If we're already watching this channel, then we'll ignore this 1112 // request. 1113 chanPoint := newChan.FundingOutpoint 1114 if _, ok := c.activeChannels[chanPoint]; ok { 1115 return nil 1116 } 1117 1118 // First, also create an active chainWatcher for this channel to ensure 1119 // that we detect any relevant on chain events. 1120 chainWatcher, err := newChainWatcher( 1121 chainWatcherConfig{ 1122 chanState: newChan, 1123 notifier: c.cfg.Notifier, 1124 signer: c.cfg.Signer, 1125 isOurAddr: c.cfg.IsOurAddress, 1126 contractBreach: func( 1127 retInfo *lnwallet.BreachRetribution) error { 1128 1129 return c.cfg.ContractBreach( 1130 chanPoint, retInfo, 1131 ) 1132 }, 1133 extractStateNumHint: lnwallet.GetStateNumHint, 1134 netParams: c.cfg.NetParams, 1135 }, 1136 ) 1137 if err != nil { 1138 return err 1139 } 1140 1141 c.activeWatchers[newChan.FundingOutpoint] = chainWatcher 1142 1143 // We'll also create a new channel arbitrator instance using this new 1144 // channel, and our internal state. 1145 channelArb, err := newActiveChannelArbitrator( 1146 newChan, c, chainWatcher.SubscribeChannelEvents(), 1147 ) 1148 if err != nil { 1149 return err 1150 } 1151 1152 // With the arbitrator created, we'll add it to our set of active 1153 // arbitrators, then launch it. 1154 c.activeChannels[chanPoint] = channelArb 1155 1156 if err := channelArb.Start(nil); err != nil { 1157 return err 1158 } 1159 1160 return chainWatcher.Start() 1161 } 1162 1163 // SubscribeChannelEvents returns a new active subscription for the set of 1164 // possible on-chain events for a particular channel. The struct can be used by 1165 // callers to be notified whenever an event that changes the state of the 1166 // channel on-chain occurs. 1167 func (c *ChainArbitrator) SubscribeChannelEvents( 1168 chanPoint wire.OutPoint) (*ChainEventSubscription, error) { 1169 1170 // First, we'll attempt to look up the active watcher for this channel. 1171 // If we can't find it, then we'll return an error back to the caller. 1172 watcher, ok := c.activeWatchers[chanPoint] 1173 if !ok { 1174 return nil, fmt.Errorf("unable to find watcher for: %v", 1175 chanPoint) 1176 } 1177 1178 // With the watcher located, we'll request for it to create a new chain 1179 // event subscription client. 1180 return watcher.SubscribeChannelEvents(), nil 1181 } 1182 1183 // TODO(roasbeef): arbitration reports 1184 // * types: contested, waiting for success conf, etc