github.com/decred/dcrlnd@v0.7.6/htlcswitch/mailbox.go (about) 1 package htlcswitch 2 3 import ( 4 "bytes" 5 "container/list" 6 "errors" 7 "fmt" 8 "sync" 9 "time" 10 11 "github.com/decred/dcrlnd/clock" 12 "github.com/decred/dcrlnd/lnwallet/chainfee" 13 "github.com/decred/dcrlnd/lnwire" 14 ) 15 16 var ( 17 // ErrMailBoxShuttingDown is returned when the mailbox is interrupted by 18 // a shutdown request. 19 ErrMailBoxShuttingDown = errors.New("mailbox is shutting down") 20 21 // ErrPacketAlreadyExists signals that an attempt to add a packet failed 22 // because it already exists in the mailbox. 23 ErrPacketAlreadyExists = errors.New("mailbox already has packet") 24 ) 25 26 // MailBox is an interface which represents a concurrent-safe, in-order 27 // delivery queue for messages from the network and also from the main switch. 28 // This struct servers as a buffer between incoming messages, and messages to 29 // the handled by the link. Each of the mutating methods within this interface 30 // should be implemented in a non-blocking manner. 31 type MailBox interface { 32 // AddMessage appends a new message to the end of the message queue. 33 AddMessage(msg lnwire.Message) error 34 35 // AddPacket appends a new message to the end of the packet queue. 36 AddPacket(pkt *htlcPacket) error 37 38 // HasPacket queries the packets for a circuit key, this is used to drop 39 // packets bound for the switch that already have a queued response. 40 HasPacket(CircuitKey) bool 41 42 // AckPacket removes a packet from the mailboxes in-memory replay 43 // buffer. This will prevent a packet from being delivered after a link 44 // restarts if the switch has remained online. The returned boolean 45 // indicates whether or not a packet with the passed incoming circuit 46 // key was removed. 47 AckPacket(CircuitKey) bool 48 49 // FailAdd fails an UpdateAddHTLC that exists within the mailbox, 50 // removing it from the in-memory replay buffer. This will prevent the 51 // packet from being delivered after the link restarts if the switch has 52 // remained online. The generated LinkError will show an 53 // OutgoingFailureDownstreamHtlcAdd FailureDetail. 54 FailAdd(pkt *htlcPacket) 55 56 // MessageOutBox returns a channel that any new messages ready for 57 // delivery will be sent on. 58 MessageOutBox() chan lnwire.Message 59 60 // PacketOutBox returns a channel that any new packets ready for 61 // delivery will be sent on. 62 PacketOutBox() chan *htlcPacket 63 64 // Clears any pending wire messages from the inbox. 65 ResetMessages() error 66 67 // Reset the packet head to point at the first element in the list. 68 ResetPackets() error 69 70 // SetDustClosure takes in a closure that is used to evaluate whether 71 // mailbox HTLC's are dust. 72 SetDustClosure(isDust dustClosure) 73 74 // SetFeeRate sets the feerate to be used when evaluating dust. 75 SetFeeRate(feerate chainfee.AtomPerKByte) 76 77 // DustPackets returns the dust sum for Adds in the mailbox for the 78 // local and remote commitments. 79 DustPackets() (lnwire.MilliAtom, lnwire.MilliAtom) 80 81 // Start starts the mailbox and any goroutines it needs to operate 82 // properly. 83 Start() 84 85 // Stop signals the mailbox and its goroutines for a graceful shutdown. 86 Stop() 87 } 88 89 type mailBoxConfig struct { 90 // shortChanID is the short channel id of the channel this mailbox 91 // belongs to. 92 shortChanID lnwire.ShortChannelID 93 94 // fetchUpdate retreives the most recent channel update for the channel 95 // this mailbox belongs to. 96 fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) 97 98 // forwardPackets send a varidic number of htlcPackets to the switch to 99 // be routed. A quit channel should be provided so that the call can 100 // properly exit during shutdown. 101 forwardPackets func(chan struct{}, ...*htlcPacket) error 102 103 // clock is a time source for the mailbox. 104 clock clock.Clock 105 106 // expiry is the interval after which Adds will be cancelled if they 107 // have not been yet been delivered. The computed deadline will expiry 108 // this long after the Adds are added via AddPacket. 109 expiry time.Duration 110 } 111 112 // memoryMailBox is an implementation of the MailBox struct backed by purely 113 // in-memory queues. 114 type memoryMailBox struct { 115 started sync.Once 116 stopped sync.Once 117 118 cfg *mailBoxConfig 119 120 wireMessages *list.List 121 wireMtx sync.Mutex 122 wireCond *sync.Cond 123 124 messageOutbox chan lnwire.Message 125 msgReset chan chan struct{} 126 127 // repPkts is a queue for reply packets, e.g. Settles and Fails. 128 repPkts *list.List 129 repIndex map[CircuitKey]*list.Element 130 repHead *list.Element 131 132 // addPkts is a dedicated queue for Adds. 133 addPkts *list.List 134 addIndex map[CircuitKey]*list.Element 135 addHead *list.Element 136 137 pktMtx sync.Mutex 138 pktCond *sync.Cond 139 140 pktOutbox chan *htlcPacket 141 pktReset chan chan struct{} 142 143 wireShutdown chan struct{} 144 pktShutdown chan struct{} 145 quit chan struct{} 146 147 // feeRate is set when the link receives or sends out fee updates. It 148 // is refreshed when AttachMailBox is called in case a fee update did 149 // not get committed. In some cases it may be out of sync with the 150 // channel's feerate, but it should eventually get back in sync. 151 feeRate chainfee.AtomPerKByte 152 153 // isDust is set when AttachMailBox is called and serves to evaluate 154 // the outstanding dust in the memoryMailBox given the current set 155 // feeRate. 156 isDust dustClosure 157 } 158 159 // newMemoryMailBox creates a new instance of the memoryMailBox. 160 func newMemoryMailBox(cfg *mailBoxConfig) *memoryMailBox { 161 box := &memoryMailBox{ 162 cfg: cfg, 163 wireMessages: list.New(), 164 repPkts: list.New(), 165 addPkts: list.New(), 166 messageOutbox: make(chan lnwire.Message), 167 pktOutbox: make(chan *htlcPacket), 168 msgReset: make(chan chan struct{}, 1), 169 pktReset: make(chan chan struct{}, 1), 170 repIndex: make(map[CircuitKey]*list.Element), 171 addIndex: make(map[CircuitKey]*list.Element), 172 wireShutdown: make(chan struct{}), 173 pktShutdown: make(chan struct{}), 174 quit: make(chan struct{}), 175 } 176 box.wireCond = sync.NewCond(&box.wireMtx) 177 box.pktCond = sync.NewCond(&box.pktMtx) 178 179 return box 180 } 181 182 // A compile time assertion to ensure that memoryMailBox meets the MailBox 183 // interface. 184 var _ MailBox = (*memoryMailBox)(nil) 185 186 // courierType is an enum that reflects the distinct types of messages a 187 // MailBox can handle. Each type will be placed in an isolated mail box and 188 // will have a dedicated goroutine for delivering the messages. 189 type courierType uint8 190 191 const ( 192 // wireCourier is a type of courier that handles wire messages. 193 wireCourier courierType = iota 194 195 // pktCourier is a type of courier that handles htlc packets. 196 pktCourier 197 ) 198 199 // Start starts the mailbox and any goroutines it needs to operate properly. 200 // 201 // NOTE: This method is part of the MailBox interface. 202 func (m *memoryMailBox) Start() { 203 m.started.Do(func() { 204 go m.mailCourier(wireCourier) 205 go m.mailCourier(pktCourier) 206 }) 207 } 208 209 // ResetMessages blocks until all buffered wire messages are cleared. 210 func (m *memoryMailBox) ResetMessages() error { 211 msgDone := make(chan struct{}) 212 select { 213 case m.msgReset <- msgDone: 214 return m.signalUntilReset(wireCourier, msgDone) 215 case <-m.quit: 216 return ErrMailBoxShuttingDown 217 } 218 } 219 220 // ResetPackets blocks until the head of packets buffer is reset, causing the 221 // packets to be redelivered in order. 222 func (m *memoryMailBox) ResetPackets() error { 223 pktDone := make(chan struct{}) 224 select { 225 case m.pktReset <- pktDone: 226 return m.signalUntilReset(pktCourier, pktDone) 227 case <-m.quit: 228 return ErrMailBoxShuttingDown 229 } 230 } 231 232 // signalUntilReset strobes the condition variable for the specified inbox type 233 // until receiving a response that the mailbox has processed a reset. 234 func (m *memoryMailBox) signalUntilReset(cType courierType, 235 done chan struct{}) error { 236 237 for { 238 239 switch cType { 240 case wireCourier: 241 m.wireCond.Signal() 242 case pktCourier: 243 m.pktCond.Signal() 244 } 245 246 select { 247 case <-time.After(time.Millisecond): 248 continue 249 case <-done: 250 return nil 251 case <-m.quit: 252 return ErrMailBoxShuttingDown 253 } 254 } 255 } 256 257 // AckPacket removes the packet identified by it's incoming circuit key from the 258 // queue of packets to be delivered. The returned boolean indicates whether or 259 // not a packet with the passed incoming circuit key was removed. 260 // 261 // NOTE: It is safe to call this method multiple times for the same circuit key. 262 func (m *memoryMailBox) AckPacket(inKey CircuitKey) bool { 263 m.pktCond.L.Lock() 264 defer m.pktCond.L.Unlock() 265 266 if entry, ok := m.repIndex[inKey]; ok { 267 // Check whether we are removing the head of the queue. If so, 268 // we must advance the head to the next packet before removing. 269 // It's possible that the courier has already advanced the 270 // repHead, so this check prevents the repHead from getting 271 // desynchronized. 272 if entry == m.repHead { 273 m.repHead = entry.Next() 274 } 275 m.repPkts.Remove(entry) 276 delete(m.repIndex, inKey) 277 278 return true 279 } 280 281 if entry, ok := m.addIndex[inKey]; ok { 282 // Check whether we are removing the head of the queue. If so, 283 // we must advance the head to the next add before removing. 284 // It's possible that the courier has already advanced the 285 // addHead, so this check prevents the addHead from getting 286 // desynchronized. 287 // 288 // NOTE: While this event is rare for Settles or Fails, it could 289 // be very common for Adds since the mailbox has the ability to 290 // cancel Adds before they are delivered. When that occurs, the 291 // head of addPkts has only been peeked and we expect to be 292 // removing the head of the queue. 293 if entry == m.addHead { 294 m.addHead = entry.Next() 295 } 296 297 m.addPkts.Remove(entry) 298 delete(m.addIndex, inKey) 299 300 return true 301 } 302 303 return false 304 } 305 306 // HasPacket queries the packets for a circuit key, this is used to drop packets 307 // bound for the switch that already have a queued response. 308 func (m *memoryMailBox) HasPacket(inKey CircuitKey) bool { 309 m.pktCond.L.Lock() 310 _, ok := m.repIndex[inKey] 311 m.pktCond.L.Unlock() 312 313 return ok 314 } 315 316 // Stop signals the mailbox and its goroutines for a graceful shutdown. 317 // 318 // NOTE: This method is part of the MailBox interface. 319 func (m *memoryMailBox) Stop() { 320 m.stopped.Do(func() { 321 close(m.quit) 322 323 m.signalUntilShutdown(wireCourier) 324 m.signalUntilShutdown(pktCourier) 325 }) 326 } 327 328 // signalUntilShutdown strobes the condition variable of the passed courier 329 // type, blocking until the worker has exited. 330 func (m *memoryMailBox) signalUntilShutdown(cType courierType) { 331 var ( 332 cond *sync.Cond 333 shutdown chan struct{} 334 ) 335 336 switch cType { 337 case wireCourier: 338 cond = m.wireCond 339 shutdown = m.wireShutdown 340 case pktCourier: 341 cond = m.pktCond 342 shutdown = m.pktShutdown 343 } 344 345 for { 346 select { 347 case <-time.After(time.Millisecond): 348 cond.Signal() 349 case <-shutdown: 350 return 351 } 352 } 353 } 354 355 // pktWithExpiry wraps an incoming packet and records the time at which it it 356 // should be canceled from the mailbox. This will be used to detect if it gets 357 // stuck in the mailbox and inform when to cancel back. 358 type pktWithExpiry struct { 359 pkt *htlcPacket 360 expiry time.Time 361 } 362 363 func (p *pktWithExpiry) deadline(clock clock.Clock) <-chan time.Time { 364 return clock.TickAfter(p.expiry.Sub(clock.Now())) 365 } 366 367 // mailCourier is a dedicated goroutine whose job is to reliably deliver 368 // messages of a particular type. There are two types of couriers: wire 369 // couriers, and mail couriers. Depending on the passed courierType, this 370 // goroutine will assume one of two roles. 371 func (m *memoryMailBox) mailCourier(cType courierType) { 372 switch cType { 373 case wireCourier: 374 defer close(m.wireShutdown) 375 case pktCourier: 376 defer close(m.pktShutdown) 377 } 378 379 // TODO(roasbeef): refactor... 380 381 for { 382 // First, we'll check our condition. If our target mailbox is 383 // empty, then we'll wait until a new item is added. 384 switch cType { 385 case wireCourier: 386 m.wireCond.L.Lock() 387 for m.wireMessages.Front() == nil { 388 m.wireCond.Wait() 389 390 select { 391 case msgDone := <-m.msgReset: 392 m.wireMessages.Init() 393 394 close(msgDone) 395 case <-m.quit: 396 m.wireCond.L.Unlock() 397 return 398 default: 399 } 400 } 401 402 case pktCourier: 403 m.pktCond.L.Lock() 404 for m.repHead == nil && m.addHead == nil { 405 m.pktCond.Wait() 406 407 select { 408 // Resetting the packet queue means just moving 409 // our pointer to the front. This ensures that 410 // any un-ACK'd messages are re-delivered upon 411 // reconnect. 412 case pktDone := <-m.pktReset: 413 m.repHead = m.repPkts.Front() 414 m.addHead = m.addPkts.Front() 415 416 close(pktDone) 417 418 case <-m.quit: 419 m.pktCond.L.Unlock() 420 return 421 default: 422 } 423 } 424 } 425 426 var ( 427 nextRep *htlcPacket 428 nextRepEl *list.Element 429 nextAdd *pktWithExpiry 430 nextAddEl *list.Element 431 nextMsg lnwire.Message 432 ) 433 switch cType { 434 // Grab the datum off the front of the queue, shifting the 435 // slice's reference down one in order to remove the datum from 436 // the queue. 437 case wireCourier: 438 entry := m.wireMessages.Front() 439 nextMsg = m.wireMessages.Remove(entry).(lnwire.Message) 440 441 // For packets, we actually never remove an item until it has 442 // been ACK'd by the link. This ensures that if a read packet 443 // doesn't make it into a commitment, then it'll be 444 // re-delivered once the link comes back online. 445 case pktCourier: 446 // Peek at the head of the Settle/Fails and Add queues. 447 // We peak both even if there is a Settle/Fail present 448 // because we need to set a deadline for the next 449 // pending Add if it's present. Due to clock 450 // monotonicity, we know that the head of the Adds is 451 // the next to expire. 452 if m.repHead != nil { 453 nextRep = m.repHead.Value.(*htlcPacket) 454 nextRepEl = m.repHead 455 } 456 if m.addHead != nil { 457 nextAdd = m.addHead.Value.(*pktWithExpiry) 458 nextAddEl = m.addHead 459 } 460 } 461 462 // Now that we're done with the condition, we can unlock it to 463 // allow any callers to append to the end of our target queue. 464 switch cType { 465 case wireCourier: 466 m.wireCond.L.Unlock() 467 case pktCourier: 468 m.pktCond.L.Unlock() 469 } 470 471 // With the next message obtained, we'll now select to attempt 472 // to deliver the message. If we receive a kill signal, then 473 // we'll bail out. 474 switch cType { 475 case wireCourier: 476 select { 477 case m.messageOutbox <- nextMsg: 478 case msgDone := <-m.msgReset: 479 m.wireCond.L.Lock() 480 m.wireMessages.Init() 481 m.wireCond.L.Unlock() 482 483 close(msgDone) 484 case <-m.quit: 485 return 486 } 487 488 case pktCourier: 489 var ( 490 pktOutbox chan *htlcPacket 491 addOutbox chan *htlcPacket 492 add *htlcPacket 493 deadline <-chan time.Time 494 ) 495 496 // Prioritize delivery of Settle/Fail packets over Adds. 497 // This ensures that we actively clear the commitment of 498 // existing HTLCs before trying to add new ones. This 499 // can help to improve forwarding performance since the 500 // time to sign a commitment is linear in the number of 501 // HTLCs manifested on the commitments. 502 // 503 // NOTE: Both types are eventually delivered over the 504 // same channel, but we can control which is delivered 505 // by exclusively making one nil and the other non-nil. 506 // We know from our loop condition that at least one 507 // nextRep and nextAdd are non-nil. 508 if nextRep != nil { 509 pktOutbox = m.pktOutbox 510 } else { 511 addOutbox = m.pktOutbox 512 } 513 514 // If we have a pending Add, we'll also construct the 515 // deadline so we can fail it back if we are unable to 516 // deliver any message in time. We also dereference the 517 // nextAdd's packet, since we will need access to it in 518 // the case we are delivering it and/or if the deadline 519 // expires. 520 // 521 // NOTE: It's possible after this point for add to be 522 // nil, but this can only occur when addOutbox is also 523 // nil, hence we won't accidentally deliver a nil 524 // packet. 525 if nextAdd != nil { 526 add = nextAdd.pkt 527 deadline = nextAdd.deadline(m.cfg.clock) 528 } 529 530 select { 531 case pktOutbox <- nextRep: 532 m.pktCond.L.Lock() 533 // Only advance the repHead if this Settle or 534 // Fail is still at the head of the queue. 535 if m.repHead != nil && m.repHead == nextRepEl { 536 m.repHead = m.repHead.Next() 537 } 538 m.pktCond.L.Unlock() 539 540 case addOutbox <- add: 541 m.pktCond.L.Lock() 542 // Only advance the addHead if this Add is still 543 // at the head of the queue. 544 if m.addHead != nil && m.addHead == nextAddEl { 545 m.addHead = m.addHead.Next() 546 } 547 m.pktCond.L.Unlock() 548 549 case <-deadline: 550 m.FailAdd(add) 551 552 case pktDone := <-m.pktReset: 553 m.pktCond.L.Lock() 554 m.repHead = m.repPkts.Front() 555 m.addHead = m.addPkts.Front() 556 m.pktCond.L.Unlock() 557 558 close(pktDone) 559 560 case <-m.quit: 561 return 562 } 563 } 564 565 } 566 } 567 568 // AddMessage appends a new message to the end of the message queue. 569 // 570 // NOTE: This method is safe for concrete use and part of the MailBox 571 // interface. 572 func (m *memoryMailBox) AddMessage(msg lnwire.Message) error { 573 // First, we'll lock the condition, and add the message to the end of 574 // the wire message inbox. 575 m.wireCond.L.Lock() 576 m.wireMessages.PushBack(msg) 577 m.wireCond.L.Unlock() 578 579 // With the message added, we signal to the mailCourier that there are 580 // additional messages to deliver. 581 m.wireCond.Signal() 582 583 return nil 584 } 585 586 // AddPacket appends a new message to the end of the packet queue. 587 // 588 // NOTE: This method is safe for concrete use and part of the MailBox 589 // interface. 590 func (m *memoryMailBox) AddPacket(pkt *htlcPacket) error { 591 m.pktCond.L.Lock() 592 switch htlc := pkt.htlc.(type) { 593 594 // Split off Settle/Fail packets into the repPkts queue. 595 case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC: 596 if _, ok := m.repIndex[pkt.inKey()]; ok { 597 m.pktCond.L.Unlock() 598 return ErrPacketAlreadyExists 599 } 600 601 entry := m.repPkts.PushBack(pkt) 602 m.repIndex[pkt.inKey()] = entry 603 if m.repHead == nil { 604 m.repHead = entry 605 } 606 607 // Split off Add packets into the addPkts queue. 608 case *lnwire.UpdateAddHTLC: 609 if _, ok := m.addIndex[pkt.inKey()]; ok { 610 m.pktCond.L.Unlock() 611 return ErrPacketAlreadyExists 612 } 613 614 entry := m.addPkts.PushBack(&pktWithExpiry{ 615 pkt: pkt, 616 expiry: m.cfg.clock.Now().Add(m.cfg.expiry), 617 }) 618 m.addIndex[pkt.inKey()] = entry 619 if m.addHead == nil { 620 m.addHead = entry 621 } 622 623 default: 624 m.pktCond.L.Unlock() 625 return fmt.Errorf("unknown htlc type: %T", htlc) 626 } 627 m.pktCond.L.Unlock() 628 629 // With the packet added, we signal to the mailCourier that there are 630 // additional packets to consume. 631 m.pktCond.Signal() 632 633 return nil 634 } 635 636 // SetFeeRate sets the memoryMailBox's feerate for use in DustPackets. 637 func (m *memoryMailBox) SetFeeRate(feeRate chainfee.AtomPerKByte) { 638 m.pktCond.L.Lock() 639 defer m.pktCond.L.Unlock() 640 641 m.feeRate = feeRate 642 } 643 644 // SetDustClosure sets the memoryMailBox's dustClosure for use in DustPackets. 645 func (m *memoryMailBox) SetDustClosure(isDust dustClosure) { 646 m.pktCond.L.Lock() 647 defer m.pktCond.L.Unlock() 648 649 m.isDust = isDust 650 } 651 652 // DustPackets returns the dust sum for add packets in the mailbox. The first 653 // return value is the local dust sum and the second is the remote dust sum. 654 // This will keep track of a given dust HTLC from the time it is added via 655 // AddPacket until it is removed via AckPacket. 656 func (m *memoryMailBox) DustPackets() (lnwire.MilliAtom, 657 lnwire.MilliAtom) { 658 659 m.pktCond.L.Lock() 660 defer m.pktCond.L.Unlock() 661 662 var ( 663 localDustSum lnwire.MilliAtom 664 remoteDustSum lnwire.MilliAtom 665 ) 666 667 // Run through the map of HTLC's and determine the dust sum with calls 668 // to the memoryMailBox's isDust closure. Note that all mailbox packets 669 // are outgoing so the second argument to isDust will be false. 670 for _, e := range m.addIndex { 671 addPkt := e.Value.(*pktWithExpiry).pkt 672 673 // Evaluate whether this HTLC is dust on the local commitment. 674 if m.isDust( 675 m.feeRate, false, true, addPkt.amount.ToAtoms(), 676 ) { 677 localDustSum += addPkt.amount 678 } 679 680 // Evaluate whether this HTLC is dust on the remote commitment. 681 if m.isDust( 682 m.feeRate, false, false, addPkt.amount.ToAtoms(), 683 ) { 684 remoteDustSum += addPkt.amount 685 } 686 } 687 688 return localDustSum, remoteDustSum 689 } 690 691 // FailAdd fails an UpdateAddHTLC that exists within the mailbox, removing it 692 // from the in-memory replay buffer. This will prevent the packet from being 693 // delivered after the link restarts if the switch has remained online. The 694 // generated LinkError will show an OutgoingFailureDownstreamHtlcAdd 695 // FailureDetail. 696 func (m *memoryMailBox) FailAdd(pkt *htlcPacket) { 697 // First, remove the packet from mailbox. If we didn't find the packet 698 // because it has already been acked, we'll exit early to avoid sending 699 // a duplicate fail message through the switch. 700 if !m.AckPacket(pkt.inKey()) { 701 return 702 } 703 704 var ( 705 localFailure = false 706 reason lnwire.OpaqueReason 707 ) 708 709 // Create a temporary channel failure which we will send back to our 710 // peer if this is a forward, or report to the user if the failed 711 // payment was locally initiated. 712 var failure lnwire.FailureMessage 713 update, err := m.cfg.fetchUpdate(m.cfg.shortChanID) 714 if err != nil { 715 failure = &lnwire.FailTemporaryNodeFailure{} 716 } else { 717 failure = lnwire.NewTemporaryChannelFailure(update) 718 } 719 720 // If the payment was locally initiated (which is indicated by a nil 721 // obfuscator), we do not need to encrypt it back to the sender. 722 if pkt.obfuscator == nil { 723 var b bytes.Buffer 724 err := lnwire.EncodeFailure(&b, failure, 0) 725 if err != nil { 726 log.Errorf("Unable to encode failure: %v", err) 727 return 728 } 729 reason = lnwire.OpaqueReason(b.Bytes()) 730 localFailure = true 731 } else { 732 // If the packet is part of a forward, (identified by a non-nil 733 // obfuscator) we need to encrypt the error back to the source. 734 var err error 735 reason, err = pkt.obfuscator.EncryptFirstHop(failure) 736 if err != nil { 737 log.Errorf("Unable to obfuscate error: %v", err) 738 return 739 } 740 } 741 742 // Create a link error containing the temporary channel failure and a 743 // detail which indicates the we failed to add the htlc. 744 linkError := NewDetailedLinkError( 745 failure, OutgoingFailureDownstreamHtlcAdd, 746 ) 747 748 failPkt := &htlcPacket{ 749 incomingChanID: pkt.incomingChanID, 750 incomingHTLCID: pkt.incomingHTLCID, 751 circuit: pkt.circuit, 752 sourceRef: pkt.sourceRef, 753 hasSource: true, 754 localFailure: localFailure, 755 linkFailure: linkError, 756 htlc: &lnwire.UpdateFailHTLC{ 757 Reason: reason, 758 }, 759 } 760 761 if err := m.cfg.forwardPackets(m.quit, failPkt); err != nil { 762 log.Errorf("Unhandled error while reforwarding packets "+ 763 "settle/fail over htlcswitch: %v", err) 764 } 765 } 766 767 // MessageOutBox returns a channel that any new messages ready for delivery 768 // will be sent on. 769 // 770 // NOTE: This method is part of the MailBox interface. 771 func (m *memoryMailBox) MessageOutBox() chan lnwire.Message { 772 return m.messageOutbox 773 } 774 775 // PacketOutBox returns a channel that any new packets ready for delivery will 776 // be sent on. 777 // 778 // NOTE: This method is part of the MailBox interface. 779 func (m *memoryMailBox) PacketOutBox() chan *htlcPacket { 780 return m.pktOutbox 781 } 782 783 // mailOrchestrator is responsible for coordinating the creation and lifecycle 784 // of mailboxes used within the switch. It supports the ability to create 785 // mailboxes, reassign their short channel id's, deliver htlc packets, and 786 // queue packets for mailboxes that have not been created due to a link's late 787 // registration. 788 type mailOrchestrator struct { 789 mu sync.RWMutex 790 791 cfg *mailOrchConfig 792 793 // mailboxes caches exactly one mailbox for all known channels. 794 mailboxes map[lnwire.ChannelID]MailBox 795 796 // liveIndex maps a live short chan id to the primary mailbox key. 797 // An index in liveIndex map is only entered under two conditions: 798 // 1. A link has a non-zero short channel id at time of AddLink. 799 // 2. A link receives a non-zero short channel via UpdateShortChanID. 800 liveIndex map[lnwire.ShortChannelID]lnwire.ChannelID 801 802 // TODO(conner): add another pair of indexes: 803 // chan_id -> short_chan_id 804 // short_chan_id -> mailbox 805 // so that Deliver can lookup mailbox directly once live, 806 // but still queriable by channel_id. 807 808 // unclaimedPackets maps a live short chan id to queue of packets if no 809 // mailbox has been created. 810 unclaimedPackets map[lnwire.ShortChannelID][]*htlcPacket 811 } 812 813 type mailOrchConfig struct { 814 // forwardPackets send a varidic number of htlcPackets to the switch to 815 // be routed. A quit channel should be provided so that the call can 816 // properly exit during shutdown. 817 forwardPackets func(chan struct{}, ...*htlcPacket) error 818 819 // fetchUpdate retreives the most recent channel update for the channel 820 // this mailbox belongs to. 821 fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) 822 823 // clock is a time source for the generated mailboxes. 824 clock clock.Clock 825 826 // expiry is the interval after which Adds will be cancelled if they 827 // have not been yet been delivered. The computed deadline will expiry 828 // this long after the Adds are added to a mailbox via AddPacket. 829 expiry time.Duration 830 } 831 832 // newMailOrchestrator initializes a fresh mailOrchestrator. 833 func newMailOrchestrator(cfg *mailOrchConfig) *mailOrchestrator { 834 return &mailOrchestrator{ 835 cfg: cfg, 836 mailboxes: make(map[lnwire.ChannelID]MailBox), 837 liveIndex: make(map[lnwire.ShortChannelID]lnwire.ChannelID), 838 unclaimedPackets: make(map[lnwire.ShortChannelID][]*htlcPacket), 839 } 840 } 841 842 // Stop instructs the orchestrator to stop all active mailboxes. 843 func (mo *mailOrchestrator) Stop() { 844 for _, mailbox := range mo.mailboxes { 845 mailbox.Stop() 846 } 847 } 848 849 // GetOrCreateMailBox returns an existing mailbox belonging to `chanID`, or 850 // creates and returns a new mailbox if none is found. 851 func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID, 852 shortChanID lnwire.ShortChannelID) MailBox { 853 854 // First, try lookup the mailbox directly using only the shared mutex. 855 mo.mu.RLock() 856 mailbox, ok := mo.mailboxes[chanID] 857 if ok { 858 mo.mu.RUnlock() 859 return mailbox 860 } 861 mo.mu.RUnlock() 862 863 // Otherwise, we will try again with exclusive lock, creating a mailbox 864 // if one still has not been created. 865 mo.mu.Lock() 866 mailbox = mo.exclusiveGetOrCreateMailBox(chanID, shortChanID) 867 mo.mu.Unlock() 868 869 return mailbox 870 } 871 872 // exclusiveGetOrCreateMailBox checks for the existence of a mailbox for the 873 // given channel id. If none is found, a new one is creates, started, and 874 // recorded. 875 // 876 // NOTE: This method MUST be invoked with the mailOrchestrator's exclusive lock. 877 func (mo *mailOrchestrator) exclusiveGetOrCreateMailBox( 878 chanID lnwire.ChannelID, shortChanID lnwire.ShortChannelID) MailBox { 879 880 mailbox, ok := mo.mailboxes[chanID] 881 if !ok { 882 mailbox = newMemoryMailBox(&mailBoxConfig{ 883 shortChanID: shortChanID, 884 fetchUpdate: mo.cfg.fetchUpdate, 885 forwardPackets: mo.cfg.forwardPackets, 886 clock: mo.cfg.clock, 887 expiry: mo.cfg.expiry, 888 }) 889 mailbox.Start() 890 mo.mailboxes[chanID] = mailbox 891 } 892 893 return mailbox 894 } 895 896 // BindLiveShortChanID registers that messages bound for a particular short 897 // channel id should be forwarded to the mailbox corresponding to the given 898 // channel id. This method also checks to see if there are any unclaimed 899 // packets for this short_chan_id. If any are found, they are delivered to the 900 // mailbox and removed (marked as claimed). 901 func (mo *mailOrchestrator) BindLiveShortChanID(mailbox MailBox, 902 cid lnwire.ChannelID, sid lnwire.ShortChannelID) { 903 904 mo.mu.Lock() 905 // Update the mapping from short channel id to mailbox's channel id. 906 mo.liveIndex[sid] = cid 907 908 // Retrieve any unclaimed packets destined for this mailbox. 909 pkts := mo.unclaimedPackets[sid] 910 delete(mo.unclaimedPackets, sid) 911 mo.mu.Unlock() 912 913 // Deliver the unclaimed packets. 914 for _, pkt := range pkts { 915 mailbox.AddPacket(pkt) 916 } 917 } 918 919 // Deliver lookups the target mailbox using the live index from short_chan_id 920 // to channel_id. If the mailbox is found, the message is delivered directly. 921 // Otherwise the packet is recorded as unclaimed, and will be delivered to the 922 // mailbox upon the subsequent call to BindLiveShortChanID. 923 func (mo *mailOrchestrator) Deliver( 924 sid lnwire.ShortChannelID, pkt *htlcPacket) error { 925 926 var ( 927 mailbox MailBox 928 found bool 929 ) 930 931 // First, try to find the channel id for the target short_chan_id. If 932 // the link is live, we will also look up the created mailbox. 933 mo.mu.RLock() 934 chanID, isLive := mo.liveIndex[sid] 935 if isLive { 936 mailbox, found = mo.mailboxes[chanID] 937 } 938 mo.mu.RUnlock() 939 940 // The link is live and target mailbox was found, deliver immediately. 941 if isLive && found { 942 return mailbox.AddPacket(pkt) 943 } 944 945 // If we detected that the link has not been made live, we will acquire 946 // the exclusive lock preemptively in order to queue this packet in the 947 // list of unclaimed packets. 948 mo.mu.Lock() 949 950 // Double check to see if the mailbox has been not made live since the 951 // release of the shared lock. 952 // 953 // NOTE: Checking again with the exclusive lock held prevents a race 954 // condition where BindLiveShortChanID is interleaved between the 955 // release of the shared lock, and acquiring the exclusive lock. The 956 // result would be stuck packets, as they wouldn't be redelivered until 957 // the next call to BindLiveShortChanID, which is expected to occur 958 // infrequently. 959 chanID, isLive = mo.liveIndex[sid] 960 if isLive { 961 // Reaching this point indicates the mailbox is actually live. 962 // We'll try to load the mailbox using the fresh channel id. 963 // 964 // NOTE: This should never create a new mailbox, as the live 965 // index should only be set if the mailbox had been initialized 966 // beforehand. However, this does ensure that this case is 967 // handled properly in the event that it could happen. 968 mailbox = mo.exclusiveGetOrCreateMailBox(chanID, sid) 969 mo.mu.Unlock() 970 971 // Deliver the packet to the mailbox if it was found or created. 972 return mailbox.AddPacket(pkt) 973 } 974 975 // Finally, if the channel id is still not found in the live index, 976 // we'll add this to the list of unclaimed packets. These will be 977 // delivered upon the next call to BindLiveShortChanID. 978 mo.unclaimedPackets[sid] = append(mo.unclaimedPackets[sid], pkt) 979 mo.mu.Unlock() 980 981 return nil 982 }