github.com/songzhibin97/go-baseutils@v0.0.2-0.20240302024150-487d8ce9c082/structure/queues/lscq/point.go (about)

     1  package lscq
     2  
     3  import (
     4  	"sync"
     5  	"sync/atomic"
     6  	"unsafe"
     7  )
     8  
     9  var pointerSCQPool = sync.Pool{
    10  	New: func() interface{} {
    11  		return newPointerSCQ()
    12  	},
    13  }
    14  
    15  type PointerQueue struct {
    16  	head *pointerSCQ
    17  	_    [lscqcacheLineSize - unsafe.Sizeof(new(uintptr))]byte
    18  	tail *pointerSCQ
    19  }
    20  
    21  func NewPointer() *PointerQueue {
    22  	q := newPointerSCQ()
    23  	return &PointerQueue{head: q, tail: q}
    24  }
    25  
    26  func (q *PointerQueue) Dequeue() (data unsafe.Pointer, ok bool) {
    27  	for {
    28  		cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head))))
    29  		data, ok = cq.Dequeue()
    30  		if ok {
    31  			return
    32  		}
    33  		// cq does not have enough entries.
    34  		nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)))
    35  		if nex == nil {
    36  			// We don't have next SCQ.
    37  			return
    38  		}
    39  		// cq.next is not empty, subsequent entry will be insert into cq.next instead of cq.
    40  		// So if cq is empty, we can move it into ncqpool.
    41  		atomic.StoreInt64(&cq.threshold, int64(scqsize*2)-1)
    42  		data, ok = cq.Dequeue()
    43  		if ok {
    44  			return
    45  		}
    46  		if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)), (unsafe.Pointer(cq)), nex) {
    47  			// We can't ensure no other goroutines will access cq.
    48  			// The cq can still be previous dequeue's cq.
    49  			cq = nil
    50  		}
    51  	}
    52  }
    53  
    54  func (q *PointerQueue) Enqueue(data unsafe.Pointer) bool {
    55  	for {
    56  		cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail))))
    57  		nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)))
    58  		if nex != nil {
    59  			// Help move cq.next into tail.
    60  			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), (unsafe.Pointer(cq)), nex)
    61  			continue
    62  		}
    63  		if cq.Enqueue(data) {
    64  			return true
    65  		}
    66  		// Concurrent cq is full.
    67  		atomicTestAndSetFirstBit(&cq.tail) // close cq, subsequent enqueue will fail
    68  		cq.mu.Lock()
    69  		if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) != nil {
    70  			cq.mu.Unlock()
    71  			continue
    72  		}
    73  		ncq := pointerSCQPool.Get().(*pointerSCQ) // create a new queue
    74  		ncq.Enqueue(data)
    75  		// Try Add this queue into cq.next.
    76  		if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)), nil, unsafe.Pointer(ncq)) {
    77  			// Success.
    78  			// Try move cq.next into tail (we don't need to recheck since other enqueuer will help).
    79  			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(cq), unsafe.Pointer(ncq))
    80  			cq.mu.Unlock()
    81  			return true
    82  		}
    83  		// CAS failed, put this new SCQ into scqpool.
    84  		// No other goroutines will access this queue.
    85  		ncq.Dequeue()
    86  		pointerSCQPool.Put(ncq)
    87  		cq.mu.Unlock()
    88  	}
    89  }
    90  
    91  func newPointerSCQ() *pointerSCQ {
    92  	ring := new([scqsize]scqNodePointer)
    93  	for i := range ring {
    94  		ring[i].flags = 1<<63 + 1<<62 // newSCQFlags(true, true, 0)
    95  	}
    96  	return &pointerSCQ{
    97  		head:      scqsize,
    98  		tail:      scqsize,
    99  		threshold: -1,
   100  		ring:      ring,
   101  	}
   102  }
   103  
   104  type pointerSCQ struct {
   105  	_         [lscqcacheLineSize]byte
   106  	head      uint64
   107  	_         [lscqcacheLineSize - unsafe.Sizeof(new(uint64))]byte
   108  	tail      uint64 // 1-bit finalize + 63-bit tail
   109  	_         [lscqcacheLineSize - unsafe.Sizeof(new(uint64))]byte
   110  	threshold int64
   111  	_         [lscqcacheLineSize - unsafe.Sizeof(new(uint64))]byte
   112  	next      *pointerSCQ
   113  	ring      *[scqsize]scqNodePointer
   114  	mu        sync.Mutex
   115  }
   116  
   117  type scqNodePointer struct {
   118  	flags uint64 // isSafe 1-bit + isEmpty 1-bit + cycle 62-bit
   119  	data  unsafe.Pointer
   120  }
   121  
   122  func (q *pointerSCQ) Enqueue(data unsafe.Pointer) bool {
   123  	for {
   124  		// Increment the TAIL, try to occupy an entry.
   125  		tailvalue := atomic.AddUint64(&q.tail, 1)
   126  		tailvalue -= 1 // we need previous value
   127  		T := uint64Get63(tailvalue)
   128  		if uint64Get1(tailvalue) {
   129  			// The queue is closed, return false, so following enqueuer
   130  			// will insert this data into next SCQ.
   131  			return false
   132  		}
   133  		entAddr := &q.ring[cacheRemap16Byte(T)]
   134  		cycleT := T / scqsize
   135  	eqretry:
   136  		// Enqueue do not need data, if this entry is empty, we can assume the data is also empty.
   137  		entFlags := atomic.LoadUint64(&entAddr.flags)
   138  		isSafe, isEmpty, cycleEnt := loadSCQFlags(entFlags)
   139  		if cycleEnt < cycleT && isEmpty && (isSafe || atomic.LoadUint64(&q.head) <= T) {
   140  			// We can use this entry for adding new data if
   141  			// 1. Tail's cycle is bigger than entry's cycle.
   142  			// 2. It is empty.
   143  			// 3. It is safe or tail >= head (There is enough space for this data)
   144  			ent := scqNodePointer{flags: entFlags}
   145  			newEnt := scqNodePointer{flags: newSCQFlags(true, false, cycleT), data: data}
   146  			// Save input data into this entry.
   147  			if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) {
   148  				// Failed, do next retry.
   149  				goto eqretry
   150  			}
   151  			// Success.
   152  			if atomic.LoadInt64(&q.threshold) != (int64(scqsize)*2)-1 {
   153  				atomic.StoreInt64(&q.threshold, (int64(scqsize)*2)-1)
   154  			}
   155  			return true
   156  		}
   157  		// Add a full queue check in the loop(CAS2).
   158  		if T+1 >= atomic.LoadUint64(&q.head)+scqsize {
   159  			// T is tail's value before FAA(1), latest tail is T+1.
   160  			return false
   161  		}
   162  	}
   163  }
   164  
   165  func (q *pointerSCQ) Dequeue() (data unsafe.Pointer, ok bool) {
   166  	if atomic.LoadInt64(&q.threshold) < 0 {
   167  		// Empty queue.
   168  		return
   169  	}
   170  
   171  	for {
   172  		// Decrement HEAD, try to release an entry.
   173  		H := atomic.AddUint64(&q.head, 1)
   174  		H -= 1 // we need previous value
   175  		entAddr := &q.ring[cacheRemap16Byte(H)]
   176  		cycleH := H / scqsize
   177  	dqretry:
   178  		ent := loadSCQNodePointer(unsafe.Pointer(entAddr))
   179  		isSafe, isEmpty, cycleEnt := loadSCQFlags(ent.flags)
   180  		if cycleEnt == cycleH { // same cycle, return this entry directly
   181  			// 1. Clear the data in this slot.
   182  			// 2. Set `isEmpty` to 1
   183  			atomicWriteBarrier(&entAddr.data)
   184  			resetNode(unsafe.Pointer(entAddr))
   185  			return ent.data, true
   186  		}
   187  		if cycleEnt < cycleH {
   188  			var newEnt scqNodePointer
   189  			if isEmpty {
   190  				newEnt = scqNodePointer{flags: newSCQFlags(isSafe, true, cycleH)}
   191  			} else {
   192  				newEnt = scqNodePointer{flags: newSCQFlags(false, false, cycleEnt), data: ent.data}
   193  			}
   194  			if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) {
   195  				goto dqretry
   196  			}
   197  		}
   198  		// Check if the queue is empty.
   199  		tailvalue := atomic.LoadUint64(&q.tail)
   200  		T := uint64Get63(tailvalue)
   201  		if T <= H+1 {
   202  			// Invalid state.
   203  			q.fixstate(H + 1)
   204  			atomic.AddInt64(&q.threshold, -1)
   205  			return
   206  		}
   207  		if atomic.AddInt64(&q.threshold, -1)+1 <= 0 {
   208  			return
   209  		}
   210  	}
   211  }
   212  
   213  func (q *pointerSCQ) fixstate(originalHead uint64) {
   214  	for {
   215  		head := atomic.LoadUint64(&q.head)
   216  		if originalHead < head {
   217  			// The last dequeuer will be responsible for fixstate.
   218  			return
   219  		}
   220  		tailvalue := atomic.LoadUint64(&q.tail)
   221  		if tailvalue >= head {
   222  			// The queue has been closed, or in normal state.
   223  			return
   224  		}
   225  		if atomic.CompareAndSwapUint64(&q.tail, tailvalue, head) {
   226  			return
   227  		}
   228  	}
   229  }