github.com/decred/dcrlnd@v0.7.6/htlcswitch/queue.go (about)

     1  package htlcswitch
     2  
     3  import (
     4  	"sync"
     5  	"sync/atomic"
     6  	"time"
     7  
     8  	"github.com/decred/dcrlnd/lnwire"
     9  )
    10  
    11  // packetQueue is a goroutine-safe queue of htlc packets which over flow the
    12  // current commitment transaction. An HTLC will overflow the current commitment
    13  // transaction if one attempts to add a new HTLC to the state machine which
    14  // already has the max number of pending HTLC's present on the commitment
    15  // transaction.  Packets are removed from the queue by the channelLink itself
    16  // as additional slots become available on the commitment transaction itself.
    17  // In order to synchronize properly we use a semaphore to allow the channelLink
    18  // to signal the number of slots available, and a condition variable to allow
    19  // the packetQueue to know when new items have been added to the queue.
    20  type packetQueue struct {
    21  	// totalHtlcAmt is the sum of the value of all pending HTLC's currently
    22  	// residing within the overflow queue. This value should only read or
    23  	// modified *atomically*.
    24  	totalHtlcAmt int64 // To be used atomically.
    25  
    26  	// queueLen is an internal counter that reflects the size of the queue
    27  	// at any given instance. This value is intended to be use atomically
    28  	// as this value is used by internal methods to obtain the length of
    29  	// the queue w/o grabbing the main lock. This allows callers to avoid a
    30  	// deadlock situation where the main goroutine is attempting a send
    31  	// with the lock held.
    32  	queueLen int32 // To be used atomically.
    33  
    34  	streamShutdown int32 // To be used atomically.
    35  
    36  	queue []*htlcPacket
    37  
    38  	wg sync.WaitGroup
    39  
    40  	// freeSlots serves as a semaphore who's current value signals the
    41  	// number of available slots on the commitment transaction.
    42  	freeSlots chan struct{}
    43  
    44  	queueCond *sync.Cond
    45  	queueMtx  sync.Mutex
    46  
    47  	// outgoingPkts is a channel that the channelLink will receive on in
    48  	// order to drain the packetQueue as new slots become available on the
    49  	// commitment transaction.
    50  	outgoingPkts chan *htlcPacket
    51  
    52  	quit chan struct{}
    53  }
    54  
    55  // newPacketQueue returns a new instance of the packetQueue. The maxFreeSlots
    56  // value should reflect the max number of HTLC's that we're allowed to have
    57  // outstanding within the commitment transaction.
    58  func newPacketQueue(maxFreeSlots int) *packetQueue {
    59  	p := &packetQueue{
    60  		outgoingPkts: make(chan *htlcPacket),
    61  		freeSlots:    make(chan struct{}, maxFreeSlots),
    62  		quit:         make(chan struct{}),
    63  	}
    64  	p.queueCond = sync.NewCond(&p.queueMtx)
    65  
    66  	return p
    67  }
    68  
    69  // Start starts all goroutines that packetQueue needs to perform its normal
    70  // duties.
    71  func (p *packetQueue) Start() {
    72  	p.wg.Add(1)
    73  	go p.packetCoordinator()
    74  }
    75  
    76  // Stop signals the packetQueue for a graceful shutdown, and waits for all
    77  // goroutines to exit.
    78  func (p *packetQueue) Stop() {
    79  	close(p.quit)
    80  
    81  	// Now that we've closed the channel, we'll repeatedly signal the msg
    82  	// consumer until we've detected that it has exited.
    83  	for atomic.LoadInt32(&p.streamShutdown) == 0 {
    84  		p.queueCond.Signal()
    85  		time.Sleep(time.Millisecond * 100)
    86  	}
    87  }
    88  
    89  // packetCoordinator is a goroutine that handles the packet overflow queue.
    90  // Using a synchronized queue, outside callers are able to append to the end of
    91  // the queue, waking up the coordinator when the queue transitions from empty
    92  // to non-empty. The packetCoordinator will then aggressively try to empty out
    93  // the queue, passing new htlcPackets to the channelLink as slots within the
    94  // commitment transaction become available.
    95  //
    96  // Future iterations of the packetCoordinator will implement congestion
    97  // avoidance logic in the face of persistent htlcPacket back-pressure.
    98  //
    99  // TODO(roasbeef): later will need to add back pressure handling heuristics
   100  // like reg congestion avoidance:
   101  //   - random dropping, RED, etc
   102  func (p *packetQueue) packetCoordinator() {
   103  	defer atomic.StoreInt32(&p.streamShutdown, 1)
   104  
   105  	for {
   106  		// First, we'll check our condition. If the queue of packets is
   107  		// empty, then we'll wait until a new item is added.
   108  		p.queueCond.L.Lock()
   109  		for len(p.queue) == 0 {
   110  			p.queueCond.Wait()
   111  
   112  			// If we were woke up in order to exit, then we'll do
   113  			// so. Otherwise, we'll check the message queue for any
   114  			// new items.
   115  			select {
   116  			case <-p.quit:
   117  				p.queueCond.L.Unlock()
   118  				return
   119  			default:
   120  			}
   121  		}
   122  
   123  		nextPkt := p.queue[0]
   124  
   125  		p.queueCond.L.Unlock()
   126  
   127  		// If there aren't any further messages to sent (or the link
   128  		// didn't immediately read our message), then we'll block and
   129  		// wait for a new message to be sent into the overflow queue,
   130  		// or for the link's htlcForwarder to wake up.
   131  		select {
   132  		case <-p.freeSlots:
   133  
   134  			select {
   135  			case p.outgoingPkts <- nextPkt:
   136  				// Pop the item off the front of the queue and
   137  				// slide down the reference one to re-position
   138  				// the head pointer. This will set us up for
   139  				// the next iteration.  If the queue is empty
   140  				// at this point, then we'll block at the top.
   141  				p.queueCond.L.Lock()
   142  				p.queue[0] = nil
   143  				p.queue = p.queue[1:]
   144  				atomic.AddInt32(&p.queueLen, -1)
   145  				atomic.AddInt64(&p.totalHtlcAmt, int64(-nextPkt.amount))
   146  				p.queueCond.L.Unlock()
   147  			case <-p.quit:
   148  				return
   149  			}
   150  
   151  		case <-p.quit:
   152  			return
   153  
   154  		default:
   155  		}
   156  	}
   157  }
   158  
   159  // AddPkt adds the referenced packet to the overflow queue, preserving ordering
   160  // of the existing items.
   161  func (p *packetQueue) AddPkt(pkt *htlcPacket) {
   162  	// First, we'll lock the condition, and add the message to the end of
   163  	// the message queue, and increment the internal atomic for tracking
   164  	// the queue's length.
   165  	p.queueCond.L.Lock()
   166  	p.queue = append(p.queue, pkt)
   167  	atomic.AddInt32(&p.queueLen, 1)
   168  	atomic.AddInt64(&p.totalHtlcAmt, int64(pkt.amount))
   169  	p.queueCond.L.Unlock()
   170  
   171  	// With the message added, we signal to the msgConsumer that there are
   172  	// additional messages to consume.
   173  	p.queueCond.Signal()
   174  }
   175  
   176  // SignalFreeSlot signals to the queue that a new slot has opened up within the
   177  // commitment transaction. The max amount of free slots has been defined when
   178  // initially creating the packetQueue itself. This method, combined with AddPkt
   179  // creates the following abstraction: a synchronized queue of infinite length
   180  // which can be added to at will, which flows onto a commitment of fixed
   181  // capacity.
   182  func (p *packetQueue) SignalFreeSlot() {
   183  	// We'll only send over a free slot signal if the queue *is not* empty.
   184  	// Otherwise, it's possible that we attempt to overfill the free slots
   185  	// semaphore and block indefinitely below.
   186  	if atomic.LoadInt32(&p.queueLen) == 0 {
   187  		return
   188  	}
   189  
   190  	select {
   191  	case p.freeSlots <- struct{}{}:
   192  	case <-p.quit:
   193  		return
   194  	}
   195  }
   196  
   197  // Length returns the number of pending htlc packets present within the over
   198  // flow queue.
   199  func (p *packetQueue) Length() int32 {
   200  	return atomic.LoadInt32(&p.queueLen)
   201  }
   202  
   203  // TotalHtlcAmount is the total amount (in milli-atoms) of all HTLC's currently
   204  // residing within the overflow queue.
   205  func (p *packetQueue) TotalHtlcAmount() lnwire.MilliAtom {
   206  	// TODO(roasbeef): also factor in fee rate?
   207  	return lnwire.MilliAtom(atomic.LoadInt64(&p.totalHtlcAmt))
   208  }