github.com/loov/combiner@v0.1.0/extcombiner/bounded_parking.go (about) 1 package extcombiner 2 3 import ( 4 "sync" 5 "sync/atomic" 6 "unsafe" 7 ) 8 9 // BoundedParking is a bounded non-spinning combiner queue 10 // 11 // Based on https://software.intel.com/en-us/blogs/2013/02/22/combineraggregator-synchronization-primitive 12 type BoundedParking struct { 13 head unsafe.Pointer // *boundedParkingNode 14 _ [7]uint64 15 lock sync.Mutex 16 cond sync.Cond 17 _ [0]uint64 18 batcher Batcher 19 limit int 20 } 21 22 type boundedParkingNode struct { 23 next unsafe.Pointer // *boundedParkingNode 24 handoff int64 25 argument interface{} 26 } 27 28 // NewBoundedParking creates a BoundedParking queue. 29 func NewBoundedParking(batcher Batcher, limit int) *BoundedParking { 30 c := &BoundedParking{ 31 batcher: batcher, 32 limit: limit, 33 head: nil, 34 } 35 c.cond.L = &c.lock 36 return c 37 } 38 39 var boundedParkingLockedElem = boundedParkingNode{} 40 var boundedParkingLockedNode = &boundedParkingLockedElem 41 var boundedParkingLocked = (unsafe.Pointer)(boundedParkingLockedNode) 42 43 // Do passes value to Batcher and waits for completion 44 func (c *BoundedParking) Do(arg interface{}) { 45 node := &boundedParkingNode{argument: arg} 46 47 var cmp unsafe.Pointer 48 for { 49 cmp = atomic.LoadPointer(&c.head) 50 xchg := boundedParkingLocked 51 if cmp != nil { 52 // There is already a combiner, enqueue itself. 53 xchg = (unsafe.Pointer)(node) 54 node.next = cmp 55 } 56 57 if atomic.CompareAndSwapPointer(&c.head, cmp, xchg) { 58 break 59 } 60 } 61 62 handoff := false 63 if cmp != nil { 64 // 2. If we are not the combiner, wait for arg.next to become nil 65 // (which means the operation is finished). 66 c.lock.Lock() 67 for { 68 next := atomic.LoadPointer(&node.next) 69 if next == nil { 70 c.lock.Unlock() 71 return 72 } 73 if atomic.LoadInt64(&node.handoff) == 1 { 74 // start combining from the current position 75 handoff = true 76 break 77 } 78 c.cond.Wait() 79 } 80 c.lock.Unlock() 81 } 82 83 // 3. We are the combiner. 84 85 // First, execute own operation. 86 c.batcher.Start() 87 88 var count int 89 if !handoff { 90 c.batcher.Do(node.argument) 91 count++ 92 } else { 93 // Execute the list of operations. 94 for node != boundedParkingLockedNode { 95 if count == c.limit { 96 atomic.StoreInt64(&node.handoff, 1) 97 c.batcher.Finish() 98 99 c.lock.Lock() 100 c.cond.Broadcast() 101 c.lock.Unlock() 102 return 103 } 104 next := (*boundedParkingNode)(node.next) 105 c.batcher.Do(node.argument) 106 count++ 107 // Mark completion. 108 atomic.StorePointer(&node.next, nil) 109 node = next 110 } 111 } 112 113 // Then, look for combining opportunities. 114 for { 115 for { 116 cmp = atomic.LoadPointer(&c.head) 117 // If there are some operations in the list, 118 // grab the list and replace with LOCKED. 119 // Otherwise, exchange to nil. 120 var xchg unsafe.Pointer = nil 121 if cmp != boundedParkingLocked { 122 xchg = boundedParkingLocked 123 } 124 if atomic.CompareAndSwapPointer(&c.head, cmp, xchg) { 125 break 126 } 127 } 128 129 // No more operations to combine, return. 130 if cmp == boundedParkingLocked { 131 break 132 } 133 134 node = (*boundedParkingNode)(cmp) 135 136 // Execute the list of operations. 137 for node != boundedParkingLockedNode { 138 if count == c.limit { 139 atomic.StoreInt64(&node.handoff, 1) 140 c.batcher.Finish() 141 142 c.lock.Lock() 143 c.cond.Broadcast() 144 c.lock.Unlock() 145 return 146 } 147 next := (*boundedParkingNode)(node.next) 148 c.batcher.Do(node.argument) 149 count++ 150 // Mark completion. 151 atomic.StorePointer(&node.next, nil) 152 node = next 153 } 154 } 155 156 c.batcher.Finish() 157 158 c.lock.Lock() 159 c.cond.Broadcast() 160 c.lock.Unlock() 161 }