github.com/egonelbre/exp@v0.0.0-20240430123955-ed1d3aa93911/combiner/tbbu.go (about) 1 package combiner 2 3 import ( 4 "sync/atomic" 5 "unsafe" 6 ) 7 8 // based on https://software.intel.com/en-us/blogs/2013/02/22/combineraggregator-synchronization-primitive 9 type TBBUintptr struct { 10 head uintptr // *tbbNodeUintptr 11 _ [7]uint64 12 batcher Batcher 13 busy int64 14 } 15 16 type tbbNodeUintptr struct { 17 next uintptr // *tbbNodeUintptr 18 argument interface{} 19 } 20 21 func NewTBBUintptr(batcher Batcher) *TBBUintptr { 22 return &TBBUintptr{ 23 batcher: batcher, 24 head: 0, 25 } 26 } 27 28 func (c *TBBUintptr) Do(arg interface{}) { 29 node := &tbbNodeUintptr{argument: arg} 30 31 var cmp uintptr 32 for { 33 cmp = atomic.LoadUintptr(&c.head) 34 node.next = cmp 35 if atomic.CompareAndSwapUintptr(&c.head, cmp, uintptr(unsafe.Pointer(node))) { 36 break 37 } 38 } 39 40 if cmp != 0 { 41 // 2. If we are not the combiner, wait for arg.next to become nil 42 // (which means the operation is finished). 43 for try := 0; atomic.LoadUintptr(&node.next) != 0; spin(&try) { 44 } 45 } else { 46 // 3. We are the combiner. 47 48 // wait for previous combiner to finish 49 for try := 0; atomic.LoadInt64(&c.busy) == 1; spin(&try) { 50 } 51 atomic.StoreInt64(&c.busy, 1) 52 53 // First, execute own operation. 54 c.batcher.Start() 55 defer c.batcher.Finish() 56 57 // Grab the batch of operations only once 58 for { 59 cmp = atomic.LoadUintptr(&c.head) 60 if atomic.CompareAndSwapUintptr(&c.head, cmp, 0) { 61 break 62 } 63 } 64 65 node = (*tbbNodeUintptr)(unsafe.Pointer(cmp)) 66 // Execute the list of operations. 67 for node != nil { 68 next := (*tbbNodeUintptr)(unsafe.Pointer(node.next)) 69 c.batcher.Include(node.argument) 70 // Mark completion. 71 atomic.StoreUintptr(&node.next, 0) 72 node = next 73 } 74 75 // allow next combiner to proceed 76 atomic.StoreInt64(&c.busy, 0) 77 } 78 }