github.com/loov/combiner@v0.1.0/extcombiner/bounded_spinning.go (about) 1 package extcombiner 2 3 import ( 4 "sync/atomic" 5 "unsafe" 6 ) 7 8 // BoundedSpinning is a bounded spinning combiner queue 9 // 10 // Based on https://software.intel.com/en-us/blogs/2013/02/22/combineraggregator-synchronization-primitive 11 type BoundedSpinning struct { 12 head unsafe.Pointer // *boundedSpinningNode 13 _ [7]uint64 14 batcher Batcher 15 limit int 16 } 17 18 type boundedSpinningNode struct { 19 next unsafe.Pointer // *boundedSpinningNode 20 handoff int64 21 argument interface{} 22 } 23 24 // NewBoundedSpinning creates a BoundedSpinning queue. 25 func NewBoundedSpinning(batcher Batcher, limit int) *BoundedSpinning { 26 return &BoundedSpinning{ 27 batcher: batcher, 28 limit: limit, 29 head: nil, 30 } 31 } 32 33 var boundedSpinningLockedElem = boundedSpinningNode{} 34 var boundedSpinningLockedNode = &boundedSpinningLockedElem 35 var boundedSpinningLocked = (unsafe.Pointer)(boundedSpinningLockedNode) 36 37 // Do passes value to Batcher and waits for completion 38 func (c *BoundedSpinning) Do(arg interface{}) { 39 node := &boundedSpinningNode{argument: arg} 40 41 var cmp unsafe.Pointer 42 for { 43 cmp = atomic.LoadPointer(&c.head) 44 xchg := boundedSpinningLocked 45 if cmp != nil { 46 // There is already a combiner, enqueue itself. 47 xchg = (unsafe.Pointer)(node) 48 node.next = cmp 49 } 50 51 if atomic.CompareAndSwapPointer(&c.head, cmp, xchg) { 52 break 53 } 54 } 55 56 handoff := false 57 if cmp != nil { 58 // 2. If we are not the combiner, wait for arg.next to become nil 59 // (which means the operation is finished). 60 for try := 0; ; spin(&try) { 61 next := atomic.LoadPointer(&node.next) 62 if next == nil { 63 return 64 } 65 66 if atomic.LoadInt64(&node.handoff) == 1 { 67 // start combining from the current position 68 handoff = true 69 break 70 } 71 } 72 } 73 74 // 3. We are the combiner. 75 76 // First, execute own operation. 77 c.batcher.Start() 78 defer c.batcher.Finish() 79 80 var count int 81 if !handoff { 82 c.batcher.Do(node.argument) 83 count++ 84 } else { 85 // Execute the list of operations. 86 for node != boundedSpinningLockedNode { 87 if count == c.limit { 88 atomic.StoreInt64(&node.handoff, 1) 89 return 90 } 91 next := (*boundedSpinningNode)(node.next) 92 c.batcher.Do(node.argument) 93 count++ 94 // Mark completion. 95 atomic.StorePointer(&node.next, nil) 96 node = next 97 } 98 } 99 100 // Then, look for combining opportunities. 101 for { 102 for { 103 cmp = atomic.LoadPointer(&c.head) 104 // If there are some operations in the list, 105 // grab the list and replace with LOCKED. 106 // Otherwise, exchange to nil. 107 var xchg unsafe.Pointer = nil 108 if cmp != boundedSpinningLocked { 109 xchg = boundedSpinningLocked 110 } 111 if atomic.CompareAndSwapPointer(&c.head, cmp, xchg) { 112 break 113 } 114 } 115 116 // No more operations to combine, return. 117 if cmp == boundedSpinningLocked { 118 break 119 } 120 121 node = (*boundedSpinningNode)(cmp) 122 123 // Execute the list of operations. 124 for node != boundedSpinningLockedNode { 125 if count == c.limit { 126 atomic.StoreInt64(&node.handoff, 1) 127 return 128 } 129 next := (*boundedSpinningNode)(node.next) 130 c.batcher.Do(node.argument) 131 count++ 132 // Mark completion. 133 atomic.StorePointer(&node.next, nil) 134 node = next 135 } 136 } 137 }