github.com/huaweicloud/golangsdk@v0.0.0-20210831081626-d823fe11ceba/openstack/obs/pool.go (about) 1 // Copyright 2019 Huawei Technologies Co.,Ltd. 2 // Licensed under the Apache License, Version 2.0 (the "License"); you may not use 3 // this file except in compliance with the License. You may obtain a copy of the 4 // License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software distributed 9 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 10 // CONDITIONS OF ANY KIND, either express or implied. See the License for the 11 // specific language governing permissions and limitations under the License. 12 13 //nolint:structcheck, unused 14 package obs 15 16 import ( 17 "errors" 18 "fmt" 19 "runtime" 20 "sync" 21 "sync/atomic" 22 "time" 23 ) 24 25 // Future defines interface with function: Get 26 type Future interface { 27 Get() interface{} 28 } 29 30 // FutureResult for task result 31 type FutureResult struct { 32 result interface{} 33 resultChan chan interface{} 34 lock sync.Mutex 35 } 36 37 type panicResult struct { 38 presult interface{} 39 } 40 41 func (f *FutureResult) checkPanic() interface{} { 42 if r, ok := f.result.(panicResult); ok { 43 panic(r.presult) 44 } 45 return f.result 46 } 47 48 // Get gets the task result 49 func (f *FutureResult) Get() interface{} { 50 if f.resultChan == nil { 51 return f.checkPanic() 52 } 53 f.lock.Lock() 54 defer f.lock.Unlock() 55 if f.resultChan == nil { 56 return f.checkPanic() 57 } 58 59 f.result = <-f.resultChan 60 close(f.resultChan) 61 f.resultChan = nil 62 return f.checkPanic() 63 } 64 65 // Task defines interface with function: Run 66 type Task interface { 67 Run() interface{} 68 } 69 70 type funcWrapper struct { 71 f func() interface{} 72 } 73 74 func (fw *funcWrapper) Run() interface{} { 75 if fw.f != nil { 76 return fw.f() 77 } 78 return nil 79 } 80 81 type taskWrapper struct { 82 t Task 83 f *FutureResult 84 } 85 86 func (tw *taskWrapper) Run() interface{} { 87 if tw.t != nil { 88 return tw.t.Run() 89 } 90 return nil 91 } 92 93 type signalTask struct { 94 id string 95 } 96 97 func (signalTask) Run() interface{} { 98 return nil 99 } 100 101 type worker struct { 102 name string 103 taskQueue chan Task 104 wg *sync.WaitGroup 105 pool *RoutinePool 106 } 107 108 func runTask(t Task) { 109 if tw, ok := t.(*taskWrapper); ok { 110 defer func() { 111 if r := recover(); r != nil { 112 tw.f.resultChan <- panicResult{ 113 presult: r, 114 } 115 } 116 }() 117 ret := t.Run() 118 tw.f.resultChan <- ret 119 } else { 120 t.Run() 121 } 122 } 123 124 func (*worker) runTask(t Task) { 125 runTask(t) 126 } 127 128 func (w *worker) start() { 129 go func() { 130 defer func() { 131 if w.wg != nil { 132 w.wg.Done() 133 } 134 }() 135 for { 136 task, ok := <-w.taskQueue 137 if !ok { 138 break 139 } 140 w.pool.AddCurrentWorkingCnt(1) 141 w.runTask(task) 142 w.pool.AddCurrentWorkingCnt(-1) 143 if w.pool.autoTuneWorker(w) { 144 break 145 } 146 } 147 }() 148 } 149 150 func (w *worker) release() { 151 w.taskQueue = nil 152 w.wg = nil 153 w.pool = nil 154 } 155 156 // Pool defines coroutine pool interface 157 type Pool interface { 158 ShutDown() 159 Submit(t Task) (Future, error) 160 SubmitFunc(f func() interface{}) (Future, error) 161 Execute(t Task) 162 ExecuteFunc(f func() interface{}) 163 GetMaxWorkerCnt() int64 164 AddMaxWorkerCnt(value int64) int64 165 GetCurrentWorkingCnt() int64 166 AddCurrentWorkingCnt(value int64) int64 167 GetWorkerCnt() int64 168 AddWorkerCnt(value int64) int64 169 EnableAutoTune() 170 } 171 172 type basicPool struct { 173 maxWorkerCnt int64 174 workerCnt int64 175 currentWorkingCnt int64 176 isShutDown int32 177 } 178 179 // ErrTaskInvalid will be returned if the task is nil 180 var ErrTaskInvalid = errors.New("Task is nil") 181 182 func (pool *basicPool) GetCurrentWorkingCnt() int64 { 183 return atomic.LoadInt64(&pool.currentWorkingCnt) 184 } 185 186 func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 { 187 return atomic.AddInt64(&pool.currentWorkingCnt, value) 188 } 189 190 func (pool *basicPool) GetWorkerCnt() int64 { 191 return atomic.LoadInt64(&pool.workerCnt) 192 } 193 194 func (pool *basicPool) AddWorkerCnt(value int64) int64 { 195 return atomic.AddInt64(&pool.workerCnt, value) 196 } 197 198 func (pool *basicPool) GetMaxWorkerCnt() int64 { 199 return atomic.LoadInt64(&pool.maxWorkerCnt) 200 } 201 202 func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 { 203 return atomic.AddInt64(&pool.maxWorkerCnt, value) 204 } 205 206 func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool { 207 return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue) 208 } 209 210 func (pool *basicPool) EnableAutoTune() { 211 212 } 213 214 // RoutinePool defines the coroutine pool struct 215 type RoutinePool struct { 216 basicPool 217 taskQueue chan Task 218 dispatchQueue chan Task 219 workers map[string]*worker 220 cacheCnt int 221 wg *sync.WaitGroup 222 lock *sync.Mutex 223 shutDownWg *sync.WaitGroup 224 autoTune int32 225 } 226 227 // ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function 228 var ErrSubmitTimeout = errors.New("Submit task timeout") 229 230 // ErrPoolShutDown will be returned if RoutinePool is shutdown 231 var ErrPoolShutDown = errors.New("RoutinePool is shutdown") 232 233 // ErrTaskReject will be returned if submit task is rejected 234 var ErrTaskReject = errors.New("Submit task is rejected") 235 236 var closeQueue = signalTask{id: "closeQueue"} 237 238 // NewRoutinePool creates a RoutinePool instance 239 func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool { 240 if maxWorkerCnt <= 0 { 241 maxWorkerCnt = runtime.NumCPU() 242 } 243 244 pool := &RoutinePool{ 245 cacheCnt: cacheCnt, 246 wg: new(sync.WaitGroup), 247 lock: new(sync.Mutex), 248 shutDownWg: new(sync.WaitGroup), 249 autoTune: 0, 250 } 251 pool.isShutDown = 0 252 pool.maxWorkerCnt += int64(maxWorkerCnt) 253 if pool.cacheCnt <= 0 { 254 pool.taskQueue = make(chan Task) 255 } else { 256 pool.taskQueue = make(chan Task, pool.cacheCnt) 257 } 258 pool.workers = make(map[string]*worker, pool.maxWorkerCnt) 259 // dispatchQueue must not have length 260 pool.dispatchQueue = make(chan Task) 261 pool.dispatcher() 262 263 return pool 264 } 265 266 // EnableAutoTune sets the autoTune enabled 267 func (pool *RoutinePool) EnableAutoTune() { 268 atomic.StoreInt32(&pool.autoTune, 1) 269 } 270 271 func (pool *RoutinePool) checkStatus(t Task) error { 272 if t == nil { 273 return ErrTaskInvalid 274 } 275 276 if atomic.LoadInt32(&pool.isShutDown) == 1 { 277 return ErrPoolShutDown 278 } 279 return nil 280 } 281 282 func (pool *RoutinePool) dispatcher() { 283 pool.shutDownWg.Add(1) 284 go func() { 285 for { 286 task, ok := <-pool.dispatchQueue 287 if !ok { 288 break 289 } 290 291 if task == closeQueue { 292 close(pool.taskQueue) 293 pool.shutDownWg.Done() 294 continue 295 } 296 297 if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() { 298 pool.addWorker() 299 } 300 301 pool.taskQueue <- task 302 } 303 }() 304 } 305 306 // AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it 307 func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 { 308 if atomic.LoadInt32(&pool.autoTune) == 1 { 309 return pool.basicPool.AddMaxWorkerCnt(value) 310 } 311 return pool.GetMaxWorkerCnt() 312 } 313 314 func (pool *RoutinePool) addWorker() { 315 if atomic.LoadInt32(&pool.autoTune) == 1 { 316 pool.lock.Lock() 317 defer pool.lock.Unlock() 318 } 319 w := &worker{} 320 w.name = fmt.Sprintf("woker-%d", len(pool.workers)) 321 w.taskQueue = pool.taskQueue 322 w.wg = pool.wg 323 pool.AddWorkerCnt(1) 324 w.pool = pool 325 pool.workers[w.name] = w 326 pool.wg.Add(1) 327 w.start() 328 } 329 330 func (pool *RoutinePool) autoTuneWorker(w *worker) bool { 331 if atomic.LoadInt32(&pool.autoTune) == 0 { 332 return false 333 } 334 335 if w == nil { 336 return false 337 } 338 339 workerCnt := pool.GetWorkerCnt() 340 maxWorkerCnt := pool.GetMaxWorkerCnt() 341 if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) { 342 pool.lock.Lock() 343 defer pool.lock.Unlock() 344 delete(pool.workers, w.name) 345 w.wg.Done() 346 w.release() 347 return true 348 } 349 350 return false 351 } 352 353 // ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function 354 func (pool *RoutinePool) ExecuteFunc(f func() interface{}) { 355 fw := &funcWrapper{ 356 f: f, 357 } 358 pool.Execute(fw) 359 } 360 361 // Execute pushes the specified task to the dispatchQueue 362 func (pool *RoutinePool) Execute(t Task) { 363 if t != nil { 364 pool.dispatchQueue <- t 365 } 366 } 367 368 // SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function 369 func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) { 370 fw := &funcWrapper{ 371 f: f, 372 } 373 return pool.Submit(fw) 374 } 375 376 // Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info 377 func (pool *RoutinePool) Submit(t Task) (Future, error) { 378 if err := pool.checkStatus(t); err != nil { 379 return nil, err 380 } 381 f := &FutureResult{} 382 f.resultChan = make(chan interface{}, 1) 383 tw := &taskWrapper{ 384 t: t, 385 f: f, 386 } 387 pool.dispatchQueue <- tw 388 return f, nil 389 } 390 391 // SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info. 392 // Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time. 393 func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) { 394 if timeout <= 0 { 395 return pool.Submit(t) 396 } 397 if err := pool.checkStatus(t); err != nil { 398 return nil, err 399 } 400 timeoutChan := make(chan bool, 1) 401 go func() { 402 time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout))) 403 timeoutChan <- true 404 close(timeoutChan) 405 }() 406 407 f := &FutureResult{} 408 f.resultChan = make(chan interface{}, 1) 409 tw := &taskWrapper{ 410 t: t, 411 f: f, 412 } 413 select { 414 case pool.dispatchQueue <- tw: 415 return f, nil 416 case _, ok := <-timeoutChan: 417 if ok { 418 return nil, ErrSubmitTimeout 419 } 420 return nil, ErrSubmitTimeout 421 } 422 } 423 424 func (pool *RoutinePool) beforeCloseDispatchQueue() { 425 if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { 426 return 427 } 428 pool.dispatchQueue <- closeQueue 429 pool.wg.Wait() 430 } 431 432 func (pool *RoutinePool) doCloseDispatchQueue() { 433 close(pool.dispatchQueue) 434 pool.shutDownWg.Wait() 435 } 436 437 // ShutDown closes the RoutinePool instance 438 func (pool *RoutinePool) ShutDown() { 439 pool.beforeCloseDispatchQueue() 440 pool.doCloseDispatchQueue() 441 for _, w := range pool.workers { 442 w.release() 443 } 444 pool.workers = nil 445 pool.taskQueue = nil 446 pool.dispatchQueue = nil 447 } 448 449 // NoChanPool defines the coroutine pool struct 450 type NoChanPool struct { 451 basicPool 452 wg *sync.WaitGroup 453 tokens chan interface{} 454 } 455 456 // NewNochanPool creates a new NoChanPool instance 457 func NewNochanPool(maxWorkerCnt int) Pool { 458 if maxWorkerCnt <= 0 { 459 maxWorkerCnt = runtime.NumCPU() 460 } 461 462 pool := &NoChanPool{ 463 wg: new(sync.WaitGroup), 464 tokens: make(chan interface{}, maxWorkerCnt), 465 } 466 pool.isShutDown = 0 467 pool.AddMaxWorkerCnt(int64(maxWorkerCnt)) 468 469 for i := 0; i < maxWorkerCnt; i++ { 470 pool.tokens <- struct{}{} 471 } 472 473 return pool 474 } 475 476 func (pool *NoChanPool) acquire() { 477 <-pool.tokens 478 } 479 480 func (pool *NoChanPool) release() { 481 pool.tokens <- 1 482 } 483 484 func (pool *NoChanPool) execute(t Task) { 485 pool.wg.Add(1) 486 go func() { 487 pool.acquire() 488 defer func() { 489 pool.release() 490 pool.wg.Done() 491 }() 492 runTask(t) 493 }() 494 } 495 496 // ShutDown closes the NoChanPool instance 497 func (pool *NoChanPool) ShutDown() { 498 if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { 499 return 500 } 501 pool.wg.Wait() 502 } 503 504 // Execute executes the specified task 505 func (pool *NoChanPool) Execute(t Task) { 506 if t != nil { 507 pool.execute(t) 508 } 509 } 510 511 // ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function 512 func (pool *NoChanPool) ExecuteFunc(f func() interface{}) { 513 fw := &funcWrapper{ 514 f: f, 515 } 516 pool.Execute(fw) 517 } 518 519 // Submit executes the specified task, and returns the FutureResult and error info 520 func (pool *NoChanPool) Submit(t Task) (Future, error) { 521 if t == nil { 522 return nil, ErrTaskInvalid 523 } 524 525 f := &FutureResult{} 526 f.resultChan = make(chan interface{}, 1) 527 tw := &taskWrapper{ 528 t: t, 529 f: f, 530 } 531 532 pool.execute(tw) 533 return f, nil 534 } 535 536 // SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function 537 func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) { 538 fw := &funcWrapper{ 539 f: f, 540 } 541 return pool.Submit(fw) 542 }