github.com/bytedance/gopkg@v0.0.0-20240514070511-01b2cbcf35e1/collection/lscq/lscq.go (about)

     1  // Copyright 2021 ByteDance Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package lscq
    16  
    17  import (
    18  	"sync"
    19  	"sync/atomic"
    20  	"unsafe"
    21  )
    22  
    23  var pointerSCQPool = sync.Pool{
    24  	New: func() interface{} {
    25  		return newPointerSCQ()
    26  	},
    27  }
    28  
    29  type PointerQueue struct {
    30  	head *pointerSCQ
    31  	_    [cacheLineSize - unsafe.Sizeof(new(uintptr))]byte
    32  	tail *pointerSCQ
    33  }
    34  
    35  func NewPointer() *PointerQueue {
    36  	q := newPointerSCQ()
    37  	return &PointerQueue{head: q, tail: q}
    38  }
    39  
    40  func (q *PointerQueue) Dequeue() (data unsafe.Pointer, ok bool) {
    41  	for {
    42  		cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head))))
    43  		data, ok = cq.Dequeue()
    44  		if ok {
    45  			return
    46  		}
    47  		// cq does not have enough entries.
    48  		nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)))
    49  		if nex == nil {
    50  			// We don't have next SCQ.
    51  			return
    52  		}
    53  		// cq.next is not empty, subsequent entry will be insert into cq.next instead of cq.
    54  		// So if cq is empty, we can move it into ncqpool.
    55  		atomic.StoreInt64(&cq.threshold, int64(scqsize*2)-1)
    56  		data, ok = cq.Dequeue()
    57  		if ok {
    58  			return
    59  		}
    60  		if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)), (unsafe.Pointer(cq)), nex) {
    61  			// We can't ensure no other goroutines will access cq.
    62  			// The cq can still be previous dequeue's cq.
    63  			cq = nil
    64  		}
    65  	}
    66  }
    67  
    68  func (q *PointerQueue) Enqueue(data unsafe.Pointer) bool {
    69  	for {
    70  		cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail))))
    71  		nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)))
    72  		if nex != nil {
    73  			// Help move cq.next into tail.
    74  			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), (unsafe.Pointer(cq)), nex)
    75  			continue
    76  		}
    77  		if cq.Enqueue(data) {
    78  			return true
    79  		}
    80  		// Concurrent cq is full.
    81  		atomicTestAndSetFirstBit(&cq.tail) // close cq, subsequent enqueue will fail
    82  		cq.mu.Lock()
    83  		if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) != nil {
    84  			cq.mu.Unlock()
    85  			continue
    86  		}
    87  		ncq := pointerSCQPool.Get().(*pointerSCQ) // create a new queue
    88  		ncq.Enqueue(data)
    89  		// Try Add this queue into cq.next.
    90  		if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)), nil, unsafe.Pointer(ncq)) {
    91  			// Success.
    92  			// Try move cq.next into tail (we don't need to recheck since other enqueuer will help).
    93  			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(cq), unsafe.Pointer(ncq))
    94  			cq.mu.Unlock()
    95  			return true
    96  		}
    97  		// CAS failed, put this new SCQ into scqpool.
    98  		// No other goroutines will access this queue.
    99  		ncq.Dequeue()
   100  		pointerSCQPool.Put(ncq)
   101  		cq.mu.Unlock()
   102  	}
   103  }
   104  
   105  func newPointerSCQ() *pointerSCQ {
   106  	ring := new([scqsize]scqNodePointer)
   107  	for i := range ring {
   108  		ring[i].flags = 1<<63 + 1<<62 // newSCQFlags(true, true, 0)
   109  	}
   110  	return &pointerSCQ{
   111  		head:      scqsize,
   112  		tail:      scqsize,
   113  		threshold: -1,
   114  		ring:      ring,
   115  	}
   116  }
   117  
   118  type pointerSCQ struct {
   119  	_         [cacheLineSize]byte
   120  	head      uint64
   121  	_         [cacheLineSize - unsafe.Sizeof(new(uint64))]byte
   122  	tail      uint64 // 1-bit finalize + 63-bit tail
   123  	_         [cacheLineSize - unsafe.Sizeof(new(uint64))]byte
   124  	threshold int64
   125  	_         [cacheLineSize - unsafe.Sizeof(new(uint64))]byte
   126  	next      *pointerSCQ
   127  	ring      *[scqsize]scqNodePointer
   128  	mu        sync.Mutex
   129  }
   130  
   131  type scqNodePointer struct {
   132  	flags uint64 // isSafe 1-bit + isEmpty 1-bit + cycle 62-bit
   133  	data  unsafe.Pointer
   134  }
   135  
   136  func (q *pointerSCQ) Enqueue(data unsafe.Pointer) bool {
   137  	atomic.LoadPointer(&data) // move data escape to heap
   138  	for {
   139  		// Increment the TAIL, try to occupy an entry.
   140  		tailvalue := atomic.AddUint64(&q.tail, 1)
   141  		tailvalue -= 1 // we need previous value
   142  		T := uint64Get63(tailvalue)
   143  		if uint64Get1(tailvalue) {
   144  			// The queue is closed, return false, so following enqueuer
   145  			// will insert this data into next SCQ.
   146  			return false
   147  		}
   148  		entAddr := &q.ring[cacheRemap16Byte(T)]
   149  		cycleT := T / scqsize
   150  	eqretry:
   151  		// Enqueue do not need data, if this entry is empty, we can assume the data is also empty.
   152  		entFlags := atomic.LoadUint64(&entAddr.flags)
   153  		isSafe, isEmpty, cycleEnt := loadSCQFlags(entFlags)
   154  		if cycleEnt < cycleT && isEmpty && (isSafe || atomic.LoadUint64(&q.head) <= T) {
   155  			// We can use this entry for adding new data if
   156  			// 1. Tail's cycle is bigger than entry's cycle.
   157  			// 2. It is empty.
   158  			// 3. It is safe or tail >= head (There is enough space for this data)
   159  			ent := scqNodePointer{flags: entFlags}
   160  			newEnt := scqNodePointer{flags: newSCQFlags(true, false, cycleT), data: data}
   161  			// Save input data into this entry.
   162  			if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) {
   163  				// Failed, do next retry.
   164  				goto eqretry
   165  			}
   166  			// Success.
   167  			if atomic.LoadInt64(&q.threshold) != (int64(scqsize)*2)-1 {
   168  				atomic.StoreInt64(&q.threshold, (int64(scqsize)*2)-1)
   169  			}
   170  			return true
   171  		}
   172  		// Add a full queue check in the loop(CAS2).
   173  		if T+1 >= atomic.LoadUint64(&q.head)+scqsize {
   174  			// T is tail's value before FAA(1), latest tail is T+1.
   175  			return false
   176  		}
   177  	}
   178  }
   179  
   180  func (q *pointerSCQ) Dequeue() (data unsafe.Pointer, ok bool) {
   181  	if atomic.LoadInt64(&q.threshold) < 0 {
   182  		// Empty queue.
   183  		return
   184  	}
   185  
   186  	for {
   187  		// Decrement HEAD, try to release an entry.
   188  		H := atomic.AddUint64(&q.head, 1)
   189  		H -= 1 // we need previous value
   190  		entAddr := &q.ring[cacheRemap16Byte(H)]
   191  		cycleH := H / scqsize
   192  	dqretry:
   193  		ent := loadSCQNodePointer(unsafe.Pointer(entAddr))
   194  		isSafe, isEmpty, cycleEnt := loadSCQFlags(ent.flags)
   195  		if cycleEnt == cycleH { // same cycle, return this entry directly
   196  			// 1. Clear the data in this slot.
   197  			// 2. Set `isEmpty` to 1
   198  			atomicWriteBarrier(&entAddr.data)
   199  			resetNode(unsafe.Pointer(entAddr))
   200  			return ent.data, true
   201  		}
   202  		if cycleEnt < cycleH {
   203  			var newEnt scqNodePointer
   204  			if isEmpty {
   205  				newEnt = scqNodePointer{flags: newSCQFlags(isSafe, true, cycleH)}
   206  			} else {
   207  				newEnt = scqNodePointer{flags: newSCQFlags(false, false, cycleEnt), data: ent.data}
   208  			}
   209  			if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) {
   210  				goto dqretry
   211  			}
   212  		}
   213  		// Check if the queue is empty.
   214  		tailvalue := atomic.LoadUint64(&q.tail)
   215  		T := uint64Get63(tailvalue)
   216  		if T <= H+1 {
   217  			// Invalid state.
   218  			q.fixstate(H + 1)
   219  			atomic.AddInt64(&q.threshold, -1)
   220  			return
   221  		}
   222  		if atomic.AddInt64(&q.threshold, -1)+1 <= 0 {
   223  			return
   224  		}
   225  	}
   226  }
   227  
   228  func (q *pointerSCQ) fixstate(originalHead uint64) {
   229  	for {
   230  		head := atomic.LoadUint64(&q.head)
   231  		if originalHead < head {
   232  			// The last dequeuer will be responsible for fixstate.
   233  			return
   234  		}
   235  		tailvalue := atomic.LoadUint64(&q.tail)
   236  		if tailvalue >= head {
   237  			// The queue has been closed, or in normal state.
   238  			return
   239  		}
   240  		if atomic.CompareAndSwapUint64(&q.tail, tailvalue, head) {
   241  			return
   242  		}
   243  	}
   244  }