github.com/vanus-labs/vanus/lib@v0.0.0-20231221070800-1334a7b9605e/container/conque/blocking/queue.go (about)

     1  // Copyright 2023 Linkall Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package blocking
    16  
    17  import (
    18  	// standard libraries.
    19  	stdsync "sync"
    20  	"sync/atomic"
    21  
    22  	// this project.
    23  	"github.com/vanus-labs/vanus/lib/container/conque/unbounded"
    24  	"github.com/vanus-labs/vanus/lib/sync"
    25  )
    26  
    27  type Queue[T any] struct {
    28  	q     unbounded.Queue[T]
    29  	sem   sync.Semaphore
    30  	mu    stdsync.RWMutex
    31  	state int32
    32  }
    33  
    34  func New[T any](handoff bool) *Queue[T] {
    35  	return new(Queue[T]).Init(handoff)
    36  }
    37  
    38  func (q *Queue[T]) Init(handoff bool) *Queue[T] {
    39  	q.sem.Init(handoff)
    40  	return q
    41  }
    42  
    43  func (q *Queue[T]) Close() {
    44  	atomic.StoreInt32(&q.state, 1)
    45  	q.sem.Release()
    46  }
    47  
    48  // Wait ensures that all incoming Pushes observe that the queue is closed.
    49  func (q *Queue[T]) Wait() {
    50  	// Make sure no inflight Push.
    51  	q.mu.Lock()
    52  
    53  	// no op
    54  	_ = 1
    55  
    56  	q.mu.Unlock()
    57  }
    58  
    59  func (q *Queue[T]) Push(v T) bool {
    60  	// NOTE: no panic, avoid unlocking with defer.
    61  	q.mu.RLock()
    62  
    63  	// TODO: maybe atomic is unnecessary.
    64  	if atomic.LoadInt32(&q.state) != 0 {
    65  		q.mu.RUnlock()
    66  		return false
    67  	}
    68  
    69  	_ = q.q.Push(v)
    70  	q.sem.Release()
    71  	q.mu.RUnlock()
    72  	return true
    73  }
    74  
    75  func (q *Queue[T]) SharedPop() (T, bool) {
    76  	q.sem.Acquire()
    77  
    78  	// Check close.
    79  	if atomic.LoadInt32(&q.state) != 0 {
    80  		q.sem.Release()
    81  		var v T
    82  		return v, false
    83  	}
    84  
    85  	for {
    86  		v, ok := q.q.SharedPop()
    87  		if ok {
    88  			return v, true
    89  		}
    90  	}
    91  }
    92  
    93  func (q *Queue[T]) UniquePop() (T, bool) {
    94  	q.sem.Acquire()
    95  
    96  	// Check close.
    97  	if atomic.LoadInt32(&q.state) != 0 {
    98  		q.sem.Release()
    99  		var v T
   100  		return v, false
   101  	}
   102  
   103  	for {
   104  		v, _, ok := q.q.UniquePop()
   105  		if ok {
   106  			return v, true
   107  		}
   108  	}
   109  }
   110  
   111  func (q *Queue[T]) RawPop() (T, bool) {
   112  	v, _, ok := q.q.UniquePop()
   113  	return v, ok
   114  }