github.com/haraldrudell/parl@v0.4.176/nb-chan-get.go (about) 1 /* 2 © 2023–present Harald Rudell <harald.rudell@gmail.com> (https://haraldrudell.github.io/haraldrudell/) 3 ISC License 4 */ 5 6 package parl 7 8 import "math" 9 10 // Get returns a slice of elementCount or default or zero for all available items held by the channel. 11 // - if channel is empty, 0 items are returned 12 // - Get is non-blocking 13 // - n > 0: max this many items 14 // - n == 0 (or <0): all items 15 // - Get is panic-free non-blocking error-free thread-safe 16 func (n *NBChan[T]) Get(elementCount ...int) (allItems []T) { 17 18 // empty NBChan: noop return 19 if n.unsentCount.Load() == 0 { 20 return // no items available return: nil slice 21 } 22 23 // no Get after CloseNow 24 if n.isCloseNow.IsInvoked() { 25 return 26 } 27 28 // notify of pending Get 29 n.preGet() 30 31 // arguments 32 // soughtItemCount: 0 for isAllItems, >0 for that many items 33 var soughtItemCount int 34 if len(elementCount) > 0 { 35 if soughtItemCount = elementCount[0]; soughtItemCount < 0 { 36 soughtItemCount = 0 37 } 38 } 39 // Get request seeks all available items 40 var isAllItems = soughtItemCount == 0 41 42 if isAllItems { 43 if n := n.unsentCount.Load(); n > 0 { 44 allItems = make([]T, 0, n) // approximate size 45 } 46 } 47 48 n.outputLock.Lock() 49 defer n.postGet() 50 51 // get possible item from send thread 52 // - thread decrements unsent count 53 if item, itemValid := n.tcCollectThreadValue(); itemValid { 54 allItems = append(allItems, item) 55 if !isAllItems { 56 if soughtItemCount--; soughtItemCount == 0 { 57 return // fetch complete return 58 } 59 } 60 } 61 62 // fetch from n.outputQueue 63 // - updates unsent count 64 allItems = n.fetchFromOutput(&soughtItemCount, isAllItems, allItems) 65 if !isAllItems && soughtItemCount == 0 { 66 return // fetch complete return 67 } 68 69 // fetch from m.inputQueue 70 if n.swapQueues() { 71 allItems = n.fetchFromOutput(&soughtItemCount, isAllItems, allItems) 72 } 73 74 return 75 } 76 77 // preGet registers a pending Get invocation prior to outputLock 78 // - increases gets and may hold getsWait 79 // - block concurrent always-alert 80 func (n *NBChan[T]) preGet() { 81 if n.gets.Add(1) == 1 { 82 n.getsWait.HoldWaiters() 83 if n.isOnDemandThread.Load() || n.isNoThread.Load() { 84 return // not always thread 85 } 86 // await any Send SendMany always-alert operation has ended 87 // and will not be started again before all Get have exited 88 n.collectorLock.Lock() 89 defer n.collectorLock.Unlock() 90 } 91 } 92 93 // postGet is the deferred ending function for [NBChan.Get] 94 // - release outputLock 95 // - update dataWaitCh 96 // - decrease number of Get invocations 97 // - if more Get invocations are pending, do nothing 98 // - otherwise, release getsWait 99 // - check for deferred progress, if so ensure thread progress 100 func (n *NBChan[T]) postGet() { 101 n.outputLock.Unlock() 102 103 // update dataAvailable 104 var unsentCount = n.unsentCount.Load() 105 n.setDataAvailable(unsentCount > 0) 106 107 // check for last Get 108 if n.gets.Add(math.MaxUint64) > 0 { 109 return // more Get pending 110 } 111 n.getsWait.ReleaseWaiters() 112 113 // last ending Get handles progress 114 // - Send and SendMany was invoked finding unsent count zero 115 // - this is endangers thread progress because: 116 // - — an on-demand thread may exit 117 // - — an always-thread may enter alert wait 118 // - sends may still be in progress 119 // - sends will not take action while Get active. 120 // This is after the final Get ended 121 // - it is on-demand or always thread 122 // - a progress guaranteeing event must be observed 123 for { 124 if isZeroObserved, isGets := n.tcIsDeferredSend(); !isZeroObserved || isGets { 125 // progress not required or 126 // additional Get invocations exist 127 return 128 } else if !n.tcAwaitProgress() { 129 // progress was secured 130 return 131 } 132 } 133 } 134 135 // swapQueues swaps n.inputQueue and n.outputQueue0 136 // - hasData true means data is available 137 // - hasData false means inputQueue was empty and a swap did not take place 138 // - n.outputQueue must be empty 139 // - invoked while holding [NBChan.outputLock] 140 // - [NBChan.inputLock] cannot be held 141 func (n *NBChan[T]) swapQueues() (hasData bool) { 142 n.inputLock.Lock() 143 defer n.inputLock.Unlock() 144 145 if hasData = len(n.inputQueue) > 0; !hasData { 146 return // no data in input queue return 147 } 148 149 // swap the queues 150 n.outputQueue = n.inputQueue 151 n.outputCapacity.Store(uint64(cap(n.outputQueue))) 152 n.inputQueue = n.outputQueue0 153 n.inputCapacity.Store(uint64(cap(n.inputQueue))) 154 n.outputQueue0 = n.outputQueue[:0] 155 return 156 } 157 158 // fetchFromOutput gets items from [NBChan.outputQueue] 159 // - [NBChan.outputLock] must be held 160 // - decrements unsent count 161 func (n *NBChan[T]) fetchFromOutput(soughtItemCount *int, isAllItems bool, allItems0 []T) (allItems []T) { 162 allItems = allItems0 163 164 // empty queue case: no items 165 var itemGetCount = len(n.outputQueue) 166 if itemGetCount == 0 { 167 return // no available items return 168 } 169 var zeroValue T 170 var soughtIC = *soughtItemCount 171 172 // entire queue case: itemCount items 173 if isAllItems || itemGetCount <= soughtIC { 174 allItems = append(allItems, n.outputQueue...) 175 for i := 0; i < itemGetCount; i++ { 176 n.outputQueue[i] = zeroValue 177 } 178 n.outputQueue = n.outputQueue[:0] 179 n.unsentCount.Add(uint64(-itemGetCount)) 180 if !isAllItems { 181 *soughtItemCount -= itemGetCount 182 } 183 return // all queue items return: done 184 } 185 186 // first part of queue: *soughtItemCount items 187 allItems = append(allItems, n.outputQueue[:soughtIC]...) 188 copy(n.outputQueue, n.outputQueue[soughtIC:]) 189 var endIndex = itemGetCount - soughtIC 190 for i := endIndex; i < itemGetCount; i++ { 191 n.outputQueue[i] = zeroValue 192 } 193 n.outputQueue = n.outputQueue[:endIndex] 194 n.unsentCount.Add(uint64(-soughtIC)) 195 *soughtItemCount = 0 196 197 return 198 } 199 200 func (n *NBChan[T]) ensureOutput(size int) (queue []T) { 201 n.outputLock.Lock() 202 defer n.outputLock.Unlock() 203 204 if n.outputQueue != nil { 205 return 206 } 207 n.outputQueue = n.newQueue(size) 208 return 209 }