github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/ipqueue.go (about)

     1  // Copyright 2021-2023 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"sync"
    18  	"sync/atomic"
    19  )
    20  
    21  const ipQueueDefaultMaxRecycleSize = 4 * 1024
    22  
    23  // This is a generic intra-process queue.
    24  type ipQueue[T any] struct {
    25  	inprogress int64
    26  	sync.Mutex
    27  	ch   chan struct{}
    28  	elts []T
    29  	pos  int
    30  	pool *sync.Pool
    31  	mrs  int
    32  	name string
    33  	m    *sync.Map
    34  }
    35  
    36  type ipQueueOpts struct {
    37  	maxRecycleSize int
    38  }
    39  
    40  type ipQueueOpt func(*ipQueueOpts)
    41  
    42  // This option allows to set the maximum recycle size when attempting
    43  // to put back a slice to the pool.
    44  func ipQueue_MaxRecycleSize(max int) ipQueueOpt {
    45  	return func(o *ipQueueOpts) {
    46  		o.maxRecycleSize = max
    47  	}
    48  }
    49  
    50  func newIPQueue[T any](s *Server, name string, opts ...ipQueueOpt) *ipQueue[T] {
    51  	qo := ipQueueOpts{maxRecycleSize: ipQueueDefaultMaxRecycleSize}
    52  	for _, o := range opts {
    53  		o(&qo)
    54  	}
    55  	q := &ipQueue[T]{
    56  		ch:   make(chan struct{}, 1),
    57  		mrs:  qo.maxRecycleSize,
    58  		pool: &sync.Pool{},
    59  		name: name,
    60  		m:    &s.ipQueues,
    61  	}
    62  	s.ipQueues.Store(name, q)
    63  	return q
    64  }
    65  
    66  // Add the element `e` to the queue, notifying the queue channel's `ch` if the
    67  // entry is the first to be added, and returns the length of the queue after
    68  // this element is added.
    69  func (q *ipQueue[T]) push(e T) int {
    70  	var signal bool
    71  	q.Lock()
    72  	l := len(q.elts) - q.pos
    73  	if l == 0 {
    74  		signal = true
    75  		eltsi := q.pool.Get()
    76  		if eltsi != nil {
    77  			// Reason we use pointer to slice instead of slice is explained
    78  			// here: https://staticcheck.io/docs/checks#SA6002
    79  			q.elts = (*(eltsi.(*[]T)))[:0]
    80  		}
    81  		if cap(q.elts) == 0 {
    82  			q.elts = make([]T, 0, 32)
    83  		}
    84  	}
    85  	q.elts = append(q.elts, e)
    86  	l++
    87  	q.Unlock()
    88  	if signal {
    89  		select {
    90  		case q.ch <- struct{}{}:
    91  		default:
    92  		}
    93  	}
    94  	return l
    95  }
    96  
    97  // Returns the whole list of elements currently present in the queue,
    98  // emptying the queue. This should be called after receiving a notification
    99  // from the queue's `ch` notification channel that indicates that there
   100  // is something in the queue.
   101  // However, in cases where `drain()` may be called from another go
   102  // routine, it is possible that a routine is notified that there is
   103  // something, but by the time it calls `pop()`, the drain() would have
   104  // emptied the queue. So the caller should never assume that pop() will
   105  // return a slice of 1 or more, it could return `nil`.
   106  func (q *ipQueue[T]) pop() []T {
   107  	if q == nil {
   108  		return nil
   109  	}
   110  	var elts []T
   111  	q.Lock()
   112  	if q.pos == 0 {
   113  		elts = q.elts
   114  	} else {
   115  		elts = q.elts[q.pos:]
   116  	}
   117  	q.elts, q.pos = nil, 0
   118  	atomic.AddInt64(&q.inprogress, int64(len(elts)))
   119  	q.Unlock()
   120  	return elts
   121  }
   122  
   123  func (q *ipQueue[T]) resetAndReturnToPool(elts *[]T) {
   124  	(*elts) = (*elts)[:0]
   125  	q.pool.Put(elts)
   126  }
   127  
   128  // Returns the first element from the queue, if any. See comment above
   129  // regarding calling after being notified that there is something and
   130  // the use of drain(). In short, the caller should always check the
   131  // boolean return value to ensure that the value is genuine and not a
   132  // default empty value.
   133  func (q *ipQueue[T]) popOne() (T, bool) {
   134  	q.Lock()
   135  	l := len(q.elts) - q.pos
   136  	if l < 1 {
   137  		q.Unlock()
   138  		var empty T
   139  		return empty, false
   140  	}
   141  	e := q.elts[q.pos]
   142  	q.pos++
   143  	l--
   144  	if l > 0 {
   145  		// We need to re-signal
   146  		select {
   147  		case q.ch <- struct{}{}:
   148  		default:
   149  		}
   150  	} else {
   151  		// We have just emptied the queue, so we can recycle now.
   152  		q.resetAndReturnToPool(&q.elts)
   153  		q.elts, q.pos = nil, 0
   154  	}
   155  	q.Unlock()
   156  	return e, true
   157  }
   158  
   159  // After a pop(), the slice can be recycled for the next push() when
   160  // a first element is added to the queue.
   161  // This will also decrement the "in progress" count with the length
   162  // of the slice.
   163  // Reason we use pointer to slice instead of slice is explained
   164  // here: https://staticcheck.io/docs/checks#SA6002
   165  func (q *ipQueue[T]) recycle(elts *[]T) {
   166  	// If invoked with a nil list, nothing to do.
   167  	if elts == nil || *elts == nil {
   168  		return
   169  	}
   170  	// Update the in progress count.
   171  	if len(*elts) > 0 {
   172  		if atomic.AddInt64(&q.inprogress, int64(-(len(*elts)))) < 0 {
   173  			atomic.StoreInt64(&q.inprogress, 0)
   174  		}
   175  	}
   176  	// We also don't want to recycle huge slices, so check against the max.
   177  	// q.mrs is normally immutable but can be changed, in a safe way, in some tests.
   178  	if cap(*elts) > q.mrs {
   179  		return
   180  	}
   181  	q.resetAndReturnToPool(elts)
   182  }
   183  
   184  // Returns the current length of the queue.
   185  func (q *ipQueue[T]) len() int {
   186  	q.Lock()
   187  	l := len(q.elts) - q.pos
   188  	q.Unlock()
   189  	return l
   190  }
   191  
   192  // Empty the queue and consumes the notification signal if present.
   193  // Note that this could cause a reader go routine that has been
   194  // notified that there is something in the queue (reading from queue's `ch`)
   195  // may then get nothing if `drain()` is invoked before the `pop()` or `popOne()`.
   196  func (q *ipQueue[T]) drain() {
   197  	if q == nil {
   198  		return
   199  	}
   200  	q.Lock()
   201  	if q.elts != nil {
   202  		q.resetAndReturnToPool(&q.elts)
   203  		q.elts, q.pos = nil, 0
   204  	}
   205  	// Consume the signal if it was present to reduce the chance of a reader
   206  	// routine to be think that there is something in the queue...
   207  	select {
   208  	case <-q.ch:
   209  	default:
   210  	}
   211  	q.Unlock()
   212  }
   213  
   214  // Since the length of the queue goes to 0 after a pop(), it is good to
   215  // have an insight on how many elements are yet to be processed after a pop().
   216  // For that reason, the queue maintains a count of elements returned through
   217  // the pop() API. When the caller will call q.recycle(), this count will
   218  // be reduced by the size of the slice returned by pop().
   219  func (q *ipQueue[T]) inProgress() int64 {
   220  	return atomic.LoadInt64(&q.inprogress)
   221  }
   222  
   223  // Remove this queue from the server's map of ipQueues.
   224  // All ipQueue operations (such as push/pop/etc..) are still possible.
   225  func (q *ipQueue[T]) unregister() {
   226  	if q == nil {
   227  		return
   228  	}
   229  	q.m.Delete(q.name)
   230  }