github.com/loov/combiner@v0.1.0/spinning.go (about)

     1  package combiner
     2  
     3  import (
     4  	"runtime"
     5  )
     6  
     7  // Spinning is a combiner queue with spinning waiters.
     8  //
     9  // This implementation is useful when the batcher work is small
    10  // and there are few goroutines concurrently calling Do. A good example
    11  // would be a shared data-structure.
    12  //
    13  // If very high performance is required benchmark replacing Batcher
    14  // and argument with concrete implementation.
    15  //
    16  // Deprecated: This may cause problems with OS or runtime scheduler.
    17  // Use Parking implementation instead.
    18  type Spinning struct {
    19  	limit   int64
    20  	batcher Batcher
    21  	_       [5]uint64
    22  	head    nodeptr
    23  	_       [7]uint64
    24  }
    25  
    26  // NewSpinning creates a spinning combiner with the given limit
    27  func NewSpinning(batcher Batcher, limit int) *Spinning {
    28  	q := &Spinning{}
    29  	q.Init(batcher, limit)
    30  	return q
    31  }
    32  
    33  // Init initializes a Spinning combiner.
    34  // Note: NewSpinning does this automatically.
    35  func (q *Spinning) Init(batcher Batcher, limit int) {
    36  	if limit < 0 {
    37  		panic("combiner limit must be positive")
    38  	}
    39  	q.batcher = batcher
    40  	q.limit = int64(limit)
    41  }
    42  
    43  // Do passes arg safely to batcher and calls Start / Finish.
    44  // The methods maybe called in a different goroutine.
    45  //go:nosplit
    46  //go:noinline
    47  func (q *Spinning) Do(arg interface{}) {
    48  	var mynode node
    49  	my := &mynode
    50  	my.argument = arg
    51  	defer runtime.KeepAlive(my)
    52  
    53  	var cmp nodeptr
    54  	for {
    55  		cmp = atomicLoadNodeptr(&q.head)
    56  		xchg := locked
    57  		if cmp != 0 {
    58  			xchg = my.ref()
    59  			my.next = cmp
    60  		}
    61  		if atomicCompareAndSwapNodeptr(&q.head, cmp, xchg) {
    62  			break
    63  		}
    64  	}
    65  
    66  	handoff := false
    67  	if cmp != 0 {
    68  		// busy wait
    69  		for i := 0; i < 8; i++ {
    70  			next := atomicLoadNodeptr(&my.next)
    71  			if next == 0 {
    72  				return
    73  			}
    74  			if next&handoffTag != 0 {
    75  				my.next &^= handoffTag
    76  				handoff = true
    77  				goto combining
    78  			}
    79  		}
    80  		// yielding busy wait
    81  		for {
    82  			next := atomicLoadNodeptr(&my.next)
    83  			if next == 0 {
    84  				return
    85  			}
    86  			if next&handoffTag != 0 {
    87  				my.next &^= handoffTag
    88  				handoff = true
    89  				goto combining
    90  			}
    91  			runtime.Gosched()
    92  		}
    93  	}
    94  
    95  combining:
    96  	q.batcher.Start()
    97  	q.batcher.Do(my.argument)
    98  	count := int64(1)
    99  
   100  	if handoff {
   101  		goto combine
   102  	}
   103  
   104  combinecheck:
   105  	for {
   106  		cmp = atomicLoadNodeptr(&q.head)
   107  		var xchg uintptr = 0
   108  		if cmp != locked {
   109  			xchg = locked
   110  		}
   111  
   112  		if atomicCompareAndSwapNodeptr(&q.head, cmp, xchg) {
   113  			break
   114  		}
   115  	}
   116  
   117  	// No more operations to combine, return.
   118  	if cmp == locked {
   119  		q.batcher.Finish()
   120  		return
   121  	}
   122  
   123  combine:
   124  	// Execute the list of operations.
   125  	for cmp != locked {
   126  		other := nodeptrToNode(cmp)
   127  		if count == q.limit {
   128  			atomicStoreNodeptr(&other.next, other.next|handoffTag)
   129  			q.batcher.Finish()
   130  			return
   131  		}
   132  		cmp = other.next
   133  
   134  		q.batcher.Do(other.argument)
   135  		count++
   136  		// Mark completion.
   137  		atomicStoreNodeptr(&other.next, 0)
   138  	}
   139  
   140  	goto combinecheck
   141  }