github.com/haraldrudell/parl@v0.4.176/awaitable.go (about)

     1  /*
     2  © 2023-present Harald Rudell <haraldrudell@proton.me> (https://haraldrudell.github.io/haraldrudell/)
     3  All rights reserved
     4  */
     5  
     6  package parl
     7  
     8  import "sync/atomic"
     9  
    10  // Awaitable is a semaphore allowing any number of threads to observe
    11  // and await any number of events in parallel
    12  //   - [Awaitable.Ch] returns an awaitable channel closing on trig of awaitable.
    13  //     The initial channel state is open
    14  //   - [Awaitable.Close] triggers the awaitable, ie. closes the channel.
    15  //     Upon return, the channel is guaranteed to be closed
    16  //   - — with optional [EvCon] argument, Close is eventually consistent, ie.
    17  //     Close may return prior to channel actually closed
    18  //     for higher performance
    19  //   - [Awaitable.IsClosed] returns whether the awaitable is triggered, ie. if the channel is closed
    20  //   - initialization-free, one-to-many wait mechanic, synchronizes-before, observable
    21  //   - use of channel as mechanic allows consumers to await multiple events
    22  //   - Awaitable costs a lazy channel and pointer allocation
    23  //   - note: [parl.CyclicAwaitable] is re-armable, cyclic version
    24  //   - —
    25  //   - alternative low-blocking inter-thread mechanics are [sync.WaitGroup] and [sync.RWMutex]
    26  //   - — neither is observable and the consumer cannot await other events in parallel
    27  //   - — RWMutex cyclic use has inversion of control issues
    28  //   - — WaitGroup lacks control over waiting threads requiring cyclic use to
    29  //     employ a re-created pointer and value
    30  //   - — both are less performant for the managing thread
    31  type Awaitable struct {
    32  	// closeWinner selects the thread to close the channel
    33  	closeWinner atomic.Bool
    34  	// channel managed by atomicCh
    35  	//	- lazy initialization
    36  	chanp atomic.Pointer[chan struct{}]
    37  	// isClosed indicates whether the channel is closed at atomic performance
    38  	//	- set to true after channel close complete
    39  	//	- shields channel close detection
    40  	isClosed atomic.Bool
    41  }
    42  
    43  // Ch returns an awaitable channel. Thread-safe
    44  func (a *Awaitable) Ch() (ch AwaitableCh) { return a.atomicCh() }
    45  
    46  // isClosed inspects whether the awaitable has been triggered
    47  //   - on true return, it is guaranteed that the channel has been closed
    48  //   - Thread-safe
    49  func (a *Awaitable) IsClosed() (isClosed bool) {
    50  
    51  	// read close state with atomic performance
    52  	//	- reading atomic is 0.4955 ns
    53  	if isClosed = a.isClosed.Load(); isClosed {
    54  		return
    55  	}
    56  
    57  	// get exact close state from the channel
    58  	//	- determining channel close is 3.479 ns
    59  	select {
    60  	case <-a.atomicCh():
    61  		isClosed = true
    62  	default:
    63  	}
    64  
    65  	return
    66  }
    67  
    68  // [Awaitable.Close] argument to Close meaning eventually consistency
    69  //   - may return before the channel is actually closed
    70  const EvCon = true
    71  
    72  // Close triggers awaitable by closing the channel
    73  //   - upon return, the channel is guaranteed to be closed
    74  //   - eventuallyConsistent [EvCon]: may return before the channel is atcually closed
    75  //     for higher performance
    76  //   - idempotent, deferrable, panic-free, thread-safe
    77  func (a *Awaitable) Close(eventuallyConsistent ...bool) (didClose bool) {
    78  
    79  	// select close winner
    80  	//	- CAS fail is 21.195 ns, CAS success if 8.419 ns
    81  	//	- atomic read: 0.4955 ns
    82  	if didClose = !( //
    83  	// this thread does not close if:
    84  	//	- a winner was already selected, atomic Load performance or
    85  	a.closeWinner.Load() ||
    86  		// this thread is not the winner
    87  		!a.closeWinner.CompareAndSwap(false, true)); //
    88  	!didClose {
    89  
    90  		// eventually consistent case does not wait
    91  		//	- this makes eventually consistent Close a blazing 8.655 ns parallel!
    92  		if len(eventuallyConsistent) > 0 && eventuallyConsistent[0] {
    93  			return // eventually consistent: another thread is closing the channel
    94  		}
    95  
    96  		// prevent returning before channel close
    97  		//	- closing thread successful CAS and channel close is 17 ns
    98  		//	- losing thread failing CAS is 21 ns
    99  		//	- the channel is likely already closed
   100  		if a.isClosed.Load() {
   101  			return
   102  		}
   103  
   104  		// single-thread: ≈2 ns
   105  		//	- unshielded parallel contention makes channel read an extremely slow 916 ns
   106  		//	- shielded parallel: 66% is spent in channel read
   107  		<-a.atomicCh()
   108  		return // close completed by other thread return
   109  	}
   110  	// only the winner thread arrives here
   111  
   112  	// channel close
   113  	//	- ≈9 ns
   114  	close(a.atomicCh())
   115  	// on close complete, store atomic performance flag
   116  	a.isClosed.Store(true)
   117  
   118  	return // didClose return
   119  }
   120  
   121  // atomicCh returns a non-nil channel using atomic mechanic
   122  func (a *Awaitable) atomicCh() (ch chan struct{}) {
   123  
   124  	// get channel previously created by another thread
   125  	//	- 1-pointer Load 0.5167 ns
   126  	if cp := a.chanp.Load(); cp != nil {
   127  		return *cp // channel from atomic pointer
   128  	}
   129  
   130  	// attempt to create the authoritative channel
   131  	//	- make of channel is 21.10 ns, 31.13 ns parallel
   132  	//	- CAS fail is 21.195 ns, CAS success if 8.419 ns
   133  	if ch2 := make(chan struct{}); a.chanp.CompareAndSwap(nil, &ch2) {
   134  		return ch2 // channel written to atomic pointer
   135  	}
   136  
   137  	// get channel created by other thread
   138  	return *a.chanp.Load()
   139  }