github.com/angenalZZZ/gofunc@v0.0.0-20210507121333-48ff1be3917b/f/goroutine.go (about) 1 package f 2 3 import ( 4 "sync" 5 "sync/atomic" 6 "time" 7 ) 8 9 // PoolWorker is spawning and managing a goroutine pool, allowing you 10 // to limit work coming from any number of goroutines with a synchronous API. 11 // an interface representing a working agent. 12 type PoolWorker interface { 13 // Process will synchronously perform a job and return the result. 14 Process(interface{}) interface{} 15 16 // BlockUntilReady is called before each job is processed and must block the 17 // calling goroutine until the Worker is ready to process the next job. 18 BlockUntilReady() 19 20 // Interrupt is called when a job is cancelled. The worker is responsible 21 // for unblocking the Process implementation. 22 Interrupt() 23 24 // Terminate is called when a Worker is removed from the processing pool 25 // and is responsible for cleaning up any held resources. 26 Terminate() 27 } 28 29 // closurePoolWorker is a minimal Worker implementation that simply wraps a 30 // func(interface{}) interface{} 31 type closurePoolWorker struct { 32 processor func(interface{}) interface{} 33 } 34 35 func (w *closurePoolWorker) Process(payload interface{}) interface{} { 36 return w.processor(payload) 37 } 38 39 func (w *closurePoolWorker) BlockUntilReady() {} 40 func (w *closurePoolWorker) Interrupt() {} 41 func (w *closurePoolWorker) Terminate() {} 42 43 // callbackPoolWorker is a minimal Worker implementation that attempts to cast 44 // each job into func() and either calls it if successful or returns 45 // ErrJobNotFunc. 46 type callbackPoolWorker struct{} 47 48 func (w *callbackPoolWorker) Process(payload interface{}) interface{} { 49 f, ok := payload.(func()) 50 if !ok { 51 return ErrJobNotFunc 52 } 53 f() 54 return nil 55 } 56 57 func (w *callbackPoolWorker) BlockUntilReady() {} 58 func (w *callbackPoolWorker) Interrupt() {} 59 func (w *callbackPoolWorker) Terminate() {} 60 61 // Pool is a struct that manages a collection of workers, each with their own 62 // goroutine. The Pool can initialize, expand, compress and close the workers, 63 // as well as processing jobs with the workers synchronously. 64 type Pool struct { 65 queuedJobs int64 66 67 ctor func() PoolWorker 68 workers []*poolWorkerWrapper 69 reqChan chan poolWorkRequest 70 71 workerMut sync.Mutex 72 } 73 74 // New creates a new Pool of workers that starts with n workers. You must 75 // provide a constructor function that creates new Worker types and when you 76 // change the size of the pool the constructor will be called to create each new 77 // Worker. 78 func NewPool(n int, ctor func() PoolWorker) *Pool { 79 p := &Pool{ 80 ctor: ctor, 81 reqChan: make(chan poolWorkRequest), 82 } 83 p.SetSize(n) 84 85 return p 86 } 87 88 // NewFunc creates a new Pool of a worker. 89 func NewFunc(f func(interface{}) interface{}) *Pool { 90 return NewPoolFunc(1, f) 91 } 92 93 // NewPoolsFunc creates a new Pool of workers where each worker. 94 func NewPoolsFunc(f func(interface{}) interface{}) *Pool { 95 return NewPoolFunc(NumCPUx16, f) 96 } 97 98 // NewPoolFunc creates a new Pool of workers where each worker will process using 99 // the provided func. 100 func NewPoolFunc(n int, f func(interface{}) interface{}) *Pool { 101 return NewPool(n, func() PoolWorker { 102 return &closurePoolWorker{ 103 processor: f, 104 } 105 }) 106 } 107 108 // NewPoolsCallback creates a new Pool of workers. 109 func NewPoolsCallback() *Pool { 110 return NewPoolCallback(NumCPUx16) 111 } 112 113 // NewPoolCallback creates a new Pool of workers where workers cast the job payload 114 // into a func() and runs it, or returns ErrNotFunc if the cast failed. 115 func NewPoolCallback(n int) *Pool { 116 return NewPool(n, func() PoolWorker { 117 return &callbackPoolWorker{} 118 }) 119 } 120 121 // Process will use the Pool to process a payload and synchronously return the 122 // result. Process can be called safely by any goroutines, but will panic if the 123 // Pool has been stopped. 124 func (p *Pool) Process(payload interface{}) interface{} { 125 atomic.AddInt64(&p.queuedJobs, 1) 126 127 request, open := <-p.reqChan 128 if !open { 129 panic(ErrPoolNotRunning) 130 } 131 132 request.jobChan <- payload 133 134 payload, open = <-request.retChan 135 if !open { 136 panic(ErrWorkerClosed) 137 } 138 139 atomic.AddInt64(&p.queuedJobs, -1) 140 return payload 141 } 142 143 // ProcessTimed will use the Pool to process a payload and synchronously return 144 // the result. If the timeout occurs before the job has finished the worker will 145 // be interrupted and ErrJobTimedOut will be returned. ProcessTimed can be 146 // called safely by any goroutines. 147 func (p *Pool) ProcessTimed(payload interface{}, timeout time.Duration) (interface{}, error) { 148 atomic.AddInt64(&p.queuedJobs, 1) 149 defer atomic.AddInt64(&p.queuedJobs, -1) 150 151 tout := time.NewTimer(timeout) 152 153 var request poolWorkRequest 154 var open bool 155 156 select { 157 case request, open = <-p.reqChan: 158 if !open { 159 return nil, ErrPoolNotRunning 160 } 161 case <-tout.C: 162 return nil, ErrJobTimedOut 163 } 164 165 select { 166 case request.jobChan <- payload: 167 case <-tout.C: 168 request.interruptFunc() 169 return nil, ErrJobTimedOut 170 } 171 172 select { 173 case payload, open = <-request.retChan: 174 if !open { 175 return nil, ErrWorkerClosed 176 } 177 case <-tout.C: 178 request.interruptFunc() 179 return nil, ErrJobTimedOut 180 } 181 182 tout.Stop() 183 return payload, nil 184 } 185 186 // QueueLength returns the current count of pending queued jobs. 187 func (p *Pool) QueueLength() int64 { 188 return atomic.LoadInt64(&p.queuedJobs) 189 } 190 191 // SetSize changes the total number of workers in the Pool. This can be called 192 // by any goroutine at any time unless the Pool has been stopped, in which case 193 // a panic will occur. 194 func (p *Pool) SetSize(n int) { 195 p.workerMut.Lock() 196 defer p.workerMut.Unlock() 197 198 lWorkers := len(p.workers) 199 if lWorkers == n { 200 return 201 } 202 203 // Add extra workers if N > len(workers) 204 for i := lWorkers; i < n; i++ { 205 p.workers = append(p.workers, newPoolWorkerWrapper(p.reqChan, p.ctor())) 206 } 207 208 // Asynchronously stop all workers > N 209 for i := n; i < lWorkers; i++ { 210 p.workers[i].stop() 211 } 212 213 // Synchronously wait for all workers > N to stop 214 for i := n; i < lWorkers; i++ { 215 p.workers[i].join() 216 } 217 218 // Remove stopped workers from slice 219 p.workers = p.workers[:n] 220 } 221 222 // GetSize returns the current size of the pool. 223 func (p *Pool) GetSize() int { 224 p.workerMut.Lock() 225 defer p.workerMut.Unlock() 226 227 return len(p.workers) 228 } 229 230 // Close will terminate all workers and close the job channel of this Pool. 231 func (p *Pool) Close() { 232 p.SetSize(0) 233 close(p.reqChan) 234 } 235 236 // poolWorkRequest is a struct containing context representing a workers intention 237 // to receive a work payload. 238 type poolWorkRequest struct { 239 // jobChan is used to send the payload to this worker. 240 jobChan chan<- interface{} 241 242 // retChan is used to read the result from this worker. 243 retChan <-chan interface{} 244 245 // interruptFunc can be called to cancel a running job. When called it is no 246 // longer necessary to read from retChan. 247 interruptFunc func() 248 } 249 250 // poolWorkerWrapper takes a Worker implementation and wraps it within a goroutine 251 // and channel arrangement. The poolWorkerWrapper is responsible for managing the 252 // lifetime of both the Worker and the goroutine. 253 type poolWorkerWrapper struct { 254 worker PoolWorker 255 interruptChan chan struct{} 256 257 // reqChan is NOT owned by this type, it is used to send requests for work. 258 reqChan chan<- poolWorkRequest 259 260 // closeChan can be closed in order to cleanly shutdown this worker. 261 closeChan chan struct{} 262 263 // closedChan is closed by the run() goroutine when it exits. 264 closedChan chan struct{} 265 } 266 267 func newPoolWorkerWrapper(reqChan chan<- poolWorkRequest, worker PoolWorker) *poolWorkerWrapper { 268 w := poolWorkerWrapper{ 269 worker: worker, 270 interruptChan: make(chan struct{}), 271 reqChan: reqChan, 272 closeChan: make(chan struct{}), 273 closedChan: make(chan struct{}), 274 } 275 276 go w.run() 277 278 return &w 279 } 280 281 func (w *poolWorkerWrapper) interrupt() { 282 close(w.interruptChan) 283 w.worker.Interrupt() 284 } 285 286 func (w *poolWorkerWrapper) run() { 287 jobChan, retChan := make(chan interface{}), make(chan interface{}) 288 defer func() { 289 w.worker.Terminate() 290 close(retChan) 291 close(w.closedChan) 292 }() 293 294 for { 295 // NOTE: Blocking here will prevent the worker from closing down. 296 w.worker.BlockUntilReady() 297 select { 298 case w.reqChan <- poolWorkRequest{ 299 jobChan: jobChan, 300 retChan: retChan, 301 interruptFunc: w.interrupt, 302 }: 303 select { 304 case payload := <-jobChan: 305 result := w.worker.Process(payload) 306 select { 307 case retChan <- result: 308 case <-w.interruptChan: 309 w.interruptChan = make(chan struct{}) 310 } 311 case _, _ = <-w.interruptChan: 312 w.interruptChan = make(chan struct{}) 313 } 314 case <-w.closeChan: 315 return 316 } 317 } 318 } 319 320 func (w *poolWorkerWrapper) stop() { 321 close(w.closeChan) 322 } 323 324 func (w *poolWorkerWrapper) join() { 325 <-w.closedChan 326 }