github.com/loov/combiner@v0.1.0/extcombiner/bounded_parking_uintptr.go (about)

     1  package extcombiner
     2  
     3  import (
     4  	"sync"
     5  	"sync/atomic"
     6  	"unsafe"
     7  )
     8  
     9  // BoundedParkingUintptr is a bounded non-spinning combiner queue using uintptr internally
    10  //
    11  // Based on https://software.intel.com/en-us/blogs/2013/02/22/combineraggregator-synchronization-primitive
    12  type BoundedParkingUintptr struct {
    13  	head    uintptr // *boundedParkingUintptrNode
    14  	_       [7]uint64
    15  	lock    sync.Mutex
    16  	cond    sync.Cond
    17  	_       [0]uint64
    18  	batcher Batcher
    19  	limit   int
    20  }
    21  
    22  type boundedParkingUintptrNode struct {
    23  	next     uintptr // *boundedParkingUintptrNode
    24  	argument interface{}
    25  }
    26  
    27  // NewBoundedParkingUintptr creates a BoundedParkingUintptr queue.
    28  func NewBoundedParkingUintptr(batcher Batcher, limit int) *BoundedParkingUintptr {
    29  	c := &BoundedParkingUintptr{
    30  		batcher: batcher,
    31  		limit:   limit,
    32  		head:    0,
    33  	}
    34  	c.cond.L = &c.lock
    35  	return c
    36  }
    37  
    38  const (
    39  	boundedParkingUintptrLocked     = uintptr(1)
    40  	boundedParkingUintptrHandoffTag = uintptr(2)
    41  )
    42  
    43  // Do passes value to Batcher and waits for completion
    44  func (c *BoundedParkingUintptr) Do(arg interface{}) {
    45  	node := &boundedParkingUintptrNode{argument: arg}
    46  
    47  	var cmp uintptr
    48  	for {
    49  		cmp = atomic.LoadUintptr(&c.head)
    50  		xchg := boundedParkingUintptrLocked
    51  		if cmp != 0 {
    52  			// There is already a combiner, enqueue itself.
    53  			xchg = uintptr(unsafe.Pointer(node))
    54  			node.next = cmp
    55  		}
    56  
    57  		if atomic.CompareAndSwapUintptr(&c.head, cmp, xchg) {
    58  			break
    59  		}
    60  	}
    61  
    62  	count := 0
    63  	handoff := false
    64  	if cmp != 0 {
    65  		// 2. If we are not the combiner, wait for arg.next to become nil
    66  		// (which means the operation is finished).
    67  		c.lock.Lock()
    68  		for {
    69  			next := atomic.LoadUintptr(&node.next)
    70  			if next == 0 {
    71  				c.lock.Unlock()
    72  				return
    73  			}
    74  
    75  			if next&boundedParkingUintptrHandoffTag != 0 {
    76  				node.next &^= boundedParkingUintptrHandoffTag
    77  				// DO COMBINING
    78  				handoff = true
    79  				break
    80  			}
    81  			c.cond.Wait()
    82  		}
    83  		c.lock.Unlock()
    84  	}
    85  
    86  	// 3. We are the combiner.
    87  
    88  	// First, execute own operation.
    89  	c.batcher.Start()
    90  	c.batcher.Do(node.argument)
    91  	count++
    92  
    93  	// Then, look for combining opportunities.
    94  	for {
    95  		if handoff { // using goto, to keep it similar to D. Vyukov-s implementation
    96  			handoff = false
    97  			goto combiner
    98  		}
    99  
   100  		for {
   101  			cmp = atomic.LoadUintptr(&c.head)
   102  			// If there are some operations in the list,
   103  			// grab the list and replace with LOCKED.
   104  			// Otherwise, exchange to nil.
   105  			var xchg uintptr = 0
   106  			if cmp != boundedParkingUintptrLocked {
   107  				xchg = boundedParkingUintptrLocked
   108  			}
   109  
   110  			if atomic.CompareAndSwapUintptr(&c.head, cmp, xchg) {
   111  				break
   112  			}
   113  		}
   114  
   115  		// No more operations to combine, return.
   116  		if cmp == boundedParkingUintptrLocked {
   117  			break
   118  		}
   119  
   120  	combiner:
   121  		// Execute the list of operations.
   122  		for cmp != boundedParkingUintptrLocked {
   123  			node = (*boundedParkingUintptrNode)(unsafe.Pointer(cmp))
   124  			if count == c.limit {
   125  				atomic.StoreUintptr(&node.next, node.next|boundedParkingUintptrHandoffTag)
   126  				c.batcher.Finish()
   127  
   128  				c.lock.Lock()
   129  				c.cond.Broadcast()
   130  				c.lock.Unlock()
   131  				return
   132  			}
   133  			cmp = node.next
   134  
   135  			c.batcher.Do(node.argument)
   136  			count++
   137  			// Mark completion.
   138  			atomic.StoreUintptr(&node.next, 0)
   139  		}
   140  	}
   141  
   142  	c.batcher.Finish()
   143  
   144  	c.lock.Lock()
   145  	c.cond.Broadcast()
   146  	c.lock.Unlock()
   147  }