github.com/m3db/m3@v1.5.0/src/x/sync/worker_pool.go (about) 1 // Copyright (c) 2017 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 // Package sync implements synchronization facililites such as worker pools. 22 package sync 23 24 import ( 25 "time" 26 27 "github.com/m3db/m3/src/dbnode/tracepoint" 28 "github.com/m3db/m3/src/x/context" 29 ) 30 31 type workerPool struct { 32 workCh chan struct{} 33 } 34 35 // NewWorkerPool creates a new worker pool. 36 func NewWorkerPool(size int) WorkerPool { 37 return &workerPool{workCh: make(chan struct{}, size)} 38 } 39 40 func (p *workerPool) Init() { 41 for i := 0; i < cap(p.workCh); i++ { 42 p.workCh <- struct{}{} 43 } 44 } 45 46 func (p *workerPool) Go(work Work) { 47 p.GoInstrument(work) 48 } 49 50 func (p *workerPool) GoInstrument(work Work) ScheduleResult { 51 start := time.Now() 52 token := <-p.workCh 53 wait := time.Since(start) 54 go func() { 55 work() 56 p.workCh <- token 57 }() 58 return ScheduleResult{ 59 Available: true, 60 WaitTime: wait, 61 } 62 } 63 64 func (p *workerPool) GoIfAvailable(work Work) bool { 65 select { 66 case token := <-p.workCh: 67 go func() { 68 work() 69 p.workCh <- token 70 }() 71 return true 72 default: 73 return false 74 } 75 } 76 77 func (p *workerPool) GoWithTimeout(work Work, timeout time.Duration) bool { 78 return p.GoWithTimeoutInstrument(work, timeout).Available 79 } 80 81 func (p *workerPool) GoWithTimeoutInstrument(work Work, timeout time.Duration) ScheduleResult { 82 // Attempt to try writing without allocating a ticker. 83 select { 84 case token := <-p.workCh: 85 go func() { 86 work() 87 p.workCh <- token 88 }() 89 return ScheduleResult{Available: true} 90 default: 91 } 92 93 // Now allocate a ticker and attempt a write. 94 ticker := time.NewTicker(timeout) 95 defer ticker.Stop() 96 97 start := time.Now() 98 select { 99 case token := <-p.workCh: 100 wait := time.Since(start) 101 go func() { 102 work() 103 p.workCh <- token 104 }() 105 return ScheduleResult{Available: true, WaitTime: wait} 106 case <-ticker.C: 107 return ScheduleResult{Available: false, WaitTime: timeout} 108 } 109 } 110 111 func (p *workerPool) GoWithContext(ctx context.Context, work Work) ScheduleResult { 112 stdctx := ctx.GoContext() 113 // Don't give out a token if the ctx has already been canceled. 114 select { 115 case <-stdctx.Done(): 116 return ScheduleResult{Available: false, WaitTime: 0} 117 default: 118 } 119 120 start := time.Now() 121 _, sp := ctx.StartTraceSpan(tracepoint.WorkerPoolWait) 122 123 select { 124 case token := <-p.workCh: 125 sp.Finish() 126 wait := time.Since(start) 127 go func() { 128 work() 129 p.workCh <- token 130 }() 131 return ScheduleResult{Available: true, WaitTime: wait} 132 case <-stdctx.Done(): 133 sp.Finish() 134 return ScheduleResult{Available: false, WaitTime: time.Since(start)} 135 } 136 } 137 138 func (p *workerPool) FastContextCheck(batchSize int) WorkerPool { 139 return &fastWorkerPool{workerPool: p, batchSize: batchSize} 140 } 141 142 func (p *workerPool) Size() int { 143 return cap(p.workCh) 144 }