github.com/egonelbre/exp@v0.0.0-20240430123955-ed1d3aa93911/combiner/tbbsu.go (about)

     1  package combiner
     2  
     3  import (
     4  	"sync"
     5  	"sync/atomic"
     6  	"unsafe"
     7  )
     8  
     9  // based on https://software.intel.com/en-us/blogs/2013/02/22/combineraggregator-synchronization-primitive
    10  type TBBSleepyUintptr struct {
    11  	head uintptr // *tbbNodeSleepyUintptr
    12  	_    [7]uint64
    13  	lock sync.Mutex
    14  	cond sync.Cond
    15  	// cacheline boundary
    16  	batcher Batcher
    17  	busy    int64
    18  }
    19  
    20  type tbbNodeSleepyUintptr struct {
    21  	next     uintptr // *tbbNodeSleepyUintptr
    22  	argument interface{}
    23  }
    24  
    25  func NewTBBSleepyUintptr(batcher Batcher) *TBBSleepyUintptr {
    26  	c := &TBBSleepyUintptr{
    27  		batcher: batcher,
    28  		head:    0,
    29  	}
    30  	c.cond.L = &c.lock
    31  	return c
    32  }
    33  
    34  func (c *TBBSleepyUintptr) Do(arg interface{}) {
    35  	node := &tbbNodeSleepyUintptr{argument: arg}
    36  
    37  	var cmp uintptr
    38  	for {
    39  		cmp = atomic.LoadUintptr(&c.head)
    40  		node.next = cmp
    41  		if atomic.CompareAndSwapUintptr(&c.head, cmp, uintptr(unsafe.Pointer(node))) {
    42  			break
    43  		}
    44  	}
    45  
    46  	if cmp != 0 {
    47  		// 2. If we are not the combiner, wait for arg.next to become nil
    48  		// (which means the operation is finished).
    49  		c.lock.Lock()
    50  		for {
    51  			if atomic.LoadUintptr(&node.next) == 0 {
    52  				c.lock.Unlock()
    53  				return
    54  			}
    55  			c.cond.Wait()
    56  		}
    57  		c.lock.Unlock()
    58  	} else {
    59  		// 3. We are the combiner.
    60  
    61  		// wait for previous combiner to finish
    62  		c.lock.Lock()
    63  		for {
    64  			if atomic.LoadInt64(&c.busy) != 1 {
    65  				break
    66  			}
    67  			c.cond.Wait()
    68  		}
    69  		atomic.StoreInt64(&c.busy, 1)
    70  		c.lock.Unlock()
    71  
    72  		// First, execute own operation.
    73  		c.batcher.Start()
    74  
    75  		// Grab the batch of operations only once
    76  		for {
    77  			cmp = atomic.LoadUintptr(&c.head)
    78  			if atomic.CompareAndSwapUintptr(&c.head, cmp, 0) {
    79  				break
    80  			}
    81  		}
    82  
    83  		node = (*tbbNodeSleepyUintptr)(unsafe.Pointer(cmp))
    84  		// Execute the list of operations.
    85  		for node != nil {
    86  			next := (*tbbNodeSleepyUintptr)(unsafe.Pointer(node.next))
    87  			c.batcher.Include(node.argument)
    88  			// Mark completion.
    89  			atomic.StoreUintptr(&node.next, 0)
    90  			node = next
    91  		}
    92  
    93  		c.batcher.Finish()
    94  
    95  		// allow next combiner to proceed
    96  		c.lock.Lock()
    97  		atomic.StoreInt64(&c.busy, 0)
    98  		c.cond.Broadcast()
    99  		c.lock.Unlock()
   100  	}
   101  }