github.com/songzhibin97/gkit@v1.2.13/structure/lscq/lscq.go (about)

     1  // Copyright 2021 ByteDance Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package lscq
    16  
    17  import (
    18  	"sync"
    19  	"sync/atomic"
    20  	"unsafe"
    21  )
    22  
    23  var pointerSCQPool = sync.Pool{
    24  	New: func() interface{} {
    25  		return newPointerSCQ()
    26  	},
    27  }
    28  
    29  type PointerQueue struct {
    30  	head *pointerSCQ
    31  	_    [cacheLineSize - unsafe.Sizeof(new(uintptr))]byte
    32  	tail *pointerSCQ
    33  }
    34  
    35  func NewPointer() *PointerQueue {
    36  	q := newPointerSCQ()
    37  	return &PointerQueue{head: q, tail: q}
    38  }
    39  
    40  func (q *PointerQueue) Dequeue() (data unsafe.Pointer, ok bool) {
    41  	for {
    42  		cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head))))
    43  		data, ok = cq.Dequeue()
    44  		if ok {
    45  			return
    46  		}
    47  		// cq does not have enough entries.
    48  		nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)))
    49  		if nex == nil {
    50  			// We don't have next SCQ.
    51  			return
    52  		}
    53  		// cq.next is not empty, subsequent entry will be insert into cq.next instead of cq.
    54  		// So if cq is empty, we can move it into ncqpool.
    55  		atomic.StoreInt64(&cq.threshold, int64(scqsize*2)-1)
    56  		data, ok = cq.Dequeue()
    57  		if ok {
    58  			return
    59  		}
    60  		if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)), (unsafe.Pointer(cq)), nex) {
    61  			// We can't ensure no other goroutines will access cq.
    62  			// The cq can still be previous dequeue's cq.
    63  			cq = nil
    64  		}
    65  	}
    66  }
    67  
    68  func (q *PointerQueue) Enqueue(data unsafe.Pointer) bool {
    69  	for {
    70  		cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail))))
    71  		nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)))
    72  		if nex != nil {
    73  			// Help move cq.next into tail.
    74  			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), (unsafe.Pointer(cq)), nex)
    75  			continue
    76  		}
    77  		if cq.Enqueue(data) {
    78  			return true
    79  		}
    80  		// Concurrent cq is full.
    81  		atomicTestAndSetFirstBit(&cq.tail) // close cq, subsequent enqueue will fail
    82  		cq.mu.Lock()
    83  		if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) != nil {
    84  			cq.mu.Unlock()
    85  			continue
    86  		}
    87  		ncq := pointerSCQPool.Get().(*pointerSCQ) // create a new queue
    88  		ncq.Enqueue(data)
    89  		// Try Add this queue into cq.next.
    90  		if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)), nil, unsafe.Pointer(ncq)) {
    91  			// Success.
    92  			// Try move cq.next into tail (we don't need to recheck since other enqueuer will help).
    93  			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(cq), unsafe.Pointer(ncq))
    94  			cq.mu.Unlock()
    95  			return true
    96  		}
    97  		// CAS failed, put this new SCQ into scqpool.
    98  		// No other goroutines will access this queue.
    99  		ncq.Dequeue()
   100  		pointerSCQPool.Put(ncq)
   101  		cq.mu.Unlock()
   102  	}
   103  }
   104  
   105  func newPointerSCQ() *pointerSCQ {
   106  	ring := new([scqsize]scqNodePointer)
   107  	for i := range ring {
   108  		ring[i].flags = 1<<63 + 1<<62 // newSCQFlags(true, true, 0)
   109  	}
   110  	return &pointerSCQ{
   111  		head:      scqsize,
   112  		tail:      scqsize,
   113  		threshold: -1,
   114  		ring:      ring,
   115  	}
   116  }
   117  
   118  type pointerSCQ struct {
   119  	_         [cacheLineSize]byte
   120  	head      uint64
   121  	_         [cacheLineSize - unsafe.Sizeof(new(uint64))]byte
   122  	tail      uint64 // 1-bit finalize + 63-bit tail
   123  	_         [cacheLineSize - unsafe.Sizeof(new(uint64))]byte
   124  	threshold int64
   125  	_         [cacheLineSize - unsafe.Sizeof(new(uint64))]byte
   126  	next      *pointerSCQ
   127  	ring      *[scqsize]scqNodePointer
   128  	mu        sync.Mutex
   129  }
   130  
   131  type scqNodePointer struct {
   132  	flags uint64 // isSafe 1-bit + isEmpty 1-bit + cycle 62-bit
   133  	data  unsafe.Pointer
   134  }
   135  
   136  func (q *pointerSCQ) Enqueue(data unsafe.Pointer) bool {
   137  	for {
   138  		// Increment the TAIL, try to occupy an entry.
   139  		tailvalue := atomic.AddUint64(&q.tail, 1)
   140  		tailvalue -= 1 // we need previous value
   141  		T := uint64Get63(tailvalue)
   142  		if uint64Get1(tailvalue) {
   143  			// The queue is closed, return false, so following enqueuer
   144  			// will insert this data into next SCQ.
   145  			return false
   146  		}
   147  		entAddr := &q.ring[cacheRemap16Byte(T)]
   148  		cycleT := T / scqsize
   149  	eqretry:
   150  		// Enqueue do not need data, if this entry is empty, we can assume the data is also empty.
   151  		entFlags := atomic.LoadUint64(&entAddr.flags)
   152  		isSafe, isEmpty, cycleEnt := loadSCQFlags(entFlags)
   153  		if cycleEnt < cycleT && isEmpty && (isSafe || atomic.LoadUint64(&q.head) <= T) {
   154  			// We can use this entry for adding new data if
   155  			// 1. Tail's cycle is bigger than entry's cycle.
   156  			// 2. It is empty.
   157  			// 3. It is safe or tail >= head (There is enough space for this data)
   158  			ent := scqNodePointer{flags: entFlags}
   159  			newEnt := scqNodePointer{flags: newSCQFlags(true, false, cycleT), data: data}
   160  			// Save input data into this entry.
   161  			if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) {
   162  				// Failed, do next retry.
   163  				goto eqretry
   164  			}
   165  			// Success.
   166  			if atomic.LoadInt64(&q.threshold) != (int64(scqsize)*2)-1 {
   167  				atomic.StoreInt64(&q.threshold, (int64(scqsize)*2)-1)
   168  			}
   169  			return true
   170  		}
   171  		// Add a full queue check in the loop(CAS2).
   172  		if T+1 >= atomic.LoadUint64(&q.head)+scqsize {
   173  			// T is tail's value before FAA(1), latest tail is T+1.
   174  			return false
   175  		}
   176  	}
   177  }
   178  
   179  func (q *pointerSCQ) Dequeue() (data unsafe.Pointer, ok bool) {
   180  	if atomic.LoadInt64(&q.threshold) < 0 {
   181  		// Empty queue.
   182  		return
   183  	}
   184  
   185  	for {
   186  		// Decrement HEAD, try to release an entry.
   187  		H := atomic.AddUint64(&q.head, 1)
   188  		H -= 1 // we need previous value
   189  		entAddr := &q.ring[cacheRemap16Byte(H)]
   190  		cycleH := H / scqsize
   191  	dqretry:
   192  		ent := loadSCQNodePointer(unsafe.Pointer(entAddr))
   193  		isSafe, isEmpty, cycleEnt := loadSCQFlags(ent.flags)
   194  		if cycleEnt == cycleH { // same cycle, return this entry directly
   195  			// 1. Clear the data in this slot.
   196  			// 2. Set `isEmpty` to 1
   197  			atomicWriteBarrier(&entAddr.data)
   198  			resetNode(unsafe.Pointer(entAddr))
   199  			return ent.data, true
   200  		}
   201  		if cycleEnt < cycleH {
   202  			var newEnt scqNodePointer
   203  			if isEmpty {
   204  				newEnt = scqNodePointer{flags: newSCQFlags(isSafe, true, cycleH)}
   205  			} else {
   206  				newEnt = scqNodePointer{flags: newSCQFlags(false, false, cycleEnt), data: ent.data}
   207  			}
   208  			if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) {
   209  				goto dqretry
   210  			}
   211  		}
   212  		// Check if the queue is empty.
   213  		tailvalue := atomic.LoadUint64(&q.tail)
   214  		T := uint64Get63(tailvalue)
   215  		if T <= H+1 {
   216  			// Invalid state.
   217  			q.fixstate(H + 1)
   218  			atomic.AddInt64(&q.threshold, -1)
   219  			return
   220  		}
   221  		if atomic.AddInt64(&q.threshold, -1)+1 <= 0 {
   222  			return
   223  		}
   224  	}
   225  }
   226  
   227  func (q *pointerSCQ) fixstate(originalHead uint64) {
   228  	for {
   229  		head := atomic.LoadUint64(&q.head)
   230  		if originalHead < head {
   231  			// The last dequeuer will be responsible for fixstate.
   232  			return
   233  		}
   234  		tailvalue := atomic.LoadUint64(&q.tail)
   235  		if tailvalue >= head {
   236  			// The queue has been closed, or in normal state.
   237  			return
   238  		}
   239  		if atomic.CompareAndSwapUint64(&q.tail, tailvalue, head) {
   240  			return
   241  		}
   242  	}
   243  }