github.com/loov/combiner@v0.1.0/extcombiner/bounded_spinning_uinptr.go (about)

     1  package extcombiner
     2  
     3  import (
     4  	"sync/atomic"
     5  	"unsafe"
     6  )
     7  
     8  // BoundedSpinningUintptr is a bounded spinning combiner queue using uintptr internally
     9  //
    10  // Based on https://software.intel.com/en-us/blogs/2013/02/22/combineraggregator-synchronization-primitive
    11  type BoundedSpinningUintptr struct {
    12  	head    uintptr // *boundedSpinningUintptrNode
    13  	_       [7]uint64
    14  	batcher Batcher
    15  	limit   int
    16  }
    17  
    18  type boundedSpinningUintptrNode struct {
    19  	next     uintptr // *boundedSpinningUintptrNode
    20  	argument interface{}
    21  }
    22  
    23  // NewBoundedSpinningUintptr creates a BoundedSpinningUintptr queue.
    24  func NewBoundedSpinningUintptr(batcher Batcher, limit int) *BoundedSpinningUintptr {
    25  	return &BoundedSpinningUintptr{
    26  		batcher: batcher,
    27  		limit:   limit,
    28  		head:    0,
    29  	}
    30  }
    31  
    32  const (
    33  	boundedSpinningUintptrLocked     = uintptr(1)
    34  	boundedSpinningUintptrHandoffTag = uintptr(2)
    35  )
    36  
    37  // Do passes value to Batcher and waits for completion
    38  func (c *BoundedSpinningUintptr) Do(arg interface{}) {
    39  	node := &boundedSpinningUintptrNode{argument: arg}
    40  
    41  	var cmp uintptr
    42  	for {
    43  		cmp = atomic.LoadUintptr(&c.head)
    44  		xchg := boundedSpinningUintptrLocked
    45  		if cmp != 0 {
    46  			// There is already a combiner, enqueue itself.
    47  			xchg = uintptr(unsafe.Pointer(node))
    48  			node.next = cmp
    49  		}
    50  
    51  		if atomic.CompareAndSwapUintptr(&c.head, cmp, xchg) {
    52  			break
    53  		}
    54  	}
    55  
    56  	count := 0
    57  	handoff := false
    58  	if cmp != 0 {
    59  		// 2. If we are not the combiner, wait for arg.next to become nil
    60  		// (which means the operation is finished).
    61  		for try := 0; ; spin(&try) {
    62  			next := atomic.LoadUintptr(&node.next)
    63  			if next == 0 {
    64  				return
    65  			}
    66  
    67  			if next&boundedSpinningUintptrHandoffTag != 0 {
    68  				node.next &^= boundedSpinningUintptrHandoffTag
    69  				// DO COMBINING
    70  				handoff = true
    71  				break
    72  			}
    73  		}
    74  	}
    75  
    76  	// 3. We are the combiner.
    77  
    78  	// First, execute own operation.
    79  	c.batcher.Start()
    80  	defer c.batcher.Finish()
    81  	c.batcher.Do(node.argument)
    82  	count++
    83  
    84  	// Then, look for combining opportunities.
    85  	for {
    86  		if handoff { // using goto, to keep it similar to D. Vyukov-s implementation
    87  			handoff = false
    88  			goto combiner
    89  		}
    90  
    91  		for {
    92  			cmp = atomic.LoadUintptr(&c.head)
    93  			// If there are some operations in the list,
    94  			// grab the list and replace with LOCKED.
    95  			// Otherwise, exchange to nil.
    96  			var xchg uintptr = 0
    97  			if cmp != boundedSpinningUintptrLocked {
    98  				xchg = boundedSpinningUintptrLocked
    99  			}
   100  
   101  			if atomic.CompareAndSwapUintptr(&c.head, cmp, xchg) {
   102  				break
   103  			}
   104  		}
   105  
   106  		// No more operations to combine, return.
   107  		if cmp == boundedSpinningUintptrLocked {
   108  			break
   109  		}
   110  
   111  	combiner:
   112  		// Execute the list of operations.
   113  		for cmp != boundedSpinningUintptrLocked {
   114  			node = (*boundedSpinningUintptrNode)(unsafe.Pointer(cmp))
   115  			if count == c.limit {
   116  				atomic.StoreUintptr(&node.next, node.next|boundedSpinningUintptrHandoffTag)
   117  				return
   118  			}
   119  			cmp = node.next
   120  
   121  			c.batcher.Do(node.argument)
   122  			count++
   123  			// Mark completion.
   124  			atomic.StoreUintptr(&node.next, 0)
   125  		}
   126  	}
   127  }