github.com/m3db/m3@v1.5.0/src/x/sync/pooled_worker_pool_test.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package sync 22 23 import ( 24 "context" 25 "sync" 26 "sync/atomic" 27 "testing" 28 "time" 29 30 "github.com/stretchr/testify/assert" 31 "github.com/stretchr/testify/require" 32 ) 33 34 func TestPooledWorkerPoolGo(t *testing.T) { 35 var count uint32 36 37 p, err := NewPooledWorkerPool(testWorkerPoolSize, NewPooledWorkerPoolOptions()) 38 require.NoError(t, err) 39 p.Init() 40 41 var wg sync.WaitGroup 42 for i := 0; i < testWorkerPoolSize*2; i++ { 43 wg.Add(1) 44 p.Go(func() { 45 atomic.AddUint32(&count, 1) 46 wg.Done() 47 }) 48 } 49 wg.Wait() 50 51 require.Equal(t, uint32(testWorkerPoolSize*2), count) 52 } 53 54 func TestPooledWorkerPoolGoWithContext(t *testing.T) { 55 ctx, cancel := context.WithCancel(context.Background()) 56 defer cancel() 57 58 wp, err := NewPooledWorkerPool(testWorkerPoolSize, 59 NewPooledWorkerPoolOptions().SetGrowOnDemand(false)) 60 require.NoError(t, err) 61 wp.Init() 62 63 // Cancel and make sure worker will prefer to return from canceled 64 // work rather than always enqueue. 65 cancel() 66 67 var aborted uint32 68 for i := 0; i < 100; i++ { 69 go func() { 70 result := wp.GoWithContext(ctx, func() { 71 time.Sleep(time.Second) 72 }) 73 if !result { 74 atomic.AddUint32(&aborted, 1) 75 } 76 }() 77 } 78 79 n := atomic.LoadUint32(&aborted) 80 require.True(t, n > 0) 81 t.Logf("aborted: %d", n) 82 } 83 84 func TestPooledWorkerPoolGoWithTimeout(t *testing.T) { 85 var ( 86 workers = 2 87 opts = NewPooledWorkerPoolOptions().SetNumShards(int64(workers)) 88 ) 89 p, err := NewPooledWorkerPool(workers, opts) 90 require.NoError(t, err) 91 p.Init() 92 93 pooledWorkerPool, ok := p.(*pooledWorkerPool) 94 require.True(t, ok) 95 96 // First fill up all the queues without blocking. 97 var wg sync.WaitGroup 98 wg.Add(1) 99 100 // Enqueue workers * 2 since buffered channel will allow workers / shards 101 // (which is 1, since 2 / 2 = 1) which means we need to enqueue two times 102 // the workers. 103 totalEnqueue := workers * 2 104 now := time.Now() 105 for i := 0; i < totalEnqueue; i++ { 106 // Set now in such a way that independent shards are selected. 107 shardNowSelect := now. 108 Truncate(time.Duration(totalEnqueue) * time.Nanosecond). 109 Add(time.Duration(i) * time.Nanosecond) 110 pooledWorkerPool.nowFn = func() time.Time { 111 return shardNowSelect 112 } 113 114 result := p.GoWithTimeout(func() { 115 wg.Wait() 116 }, 100*time.Millisecond) 117 assert.True(t, result) 118 } 119 120 // Restore the now fn. 121 pooledWorkerPool.nowFn = time.Now 122 123 // Now ensure all further enqueues time out. 124 for i := 0; i < workers; i++ { 125 result := p.GoWithTimeout(func() { 126 wg.Wait() 127 }, 100*time.Millisecond) 128 assert.False(t, result) 129 } 130 131 // Release goroutines. 132 wg.Done() 133 } 134 135 func TestPooledWorkerPoolGrowOnDemand(t *testing.T) { 136 var count uint32 137 138 p, err := NewPooledWorkerPool( 139 1, 140 NewPooledWorkerPoolOptions(). 141 SetGrowOnDemand(true)) 142 require.NoError(t, err) 143 p.Init() 144 145 var ( 146 wg sync.WaitGroup 147 numIters = testWorkerPoolSize * 2 148 doneCh = make(chan struct{}) 149 ) 150 wg.Add(numIters) 151 152 for i := 0; i < numIters; i++ { 153 // IfGoOrGrow did not allocate new goroutines then 154 // this test would never complete this loop as the 155 // anonymous Work function below would not complete 156 // and would block further iterations. 157 p.Go(func() { 158 atomic.AddUint32(&count, 1) 159 wg.Done() 160 <-doneCh 161 }) 162 } 163 close(doneCh) 164 wg.Wait() 165 166 require.Equal(t, uint32(numIters), count) 167 } 168 169 func TestPooledWorkerPoolGoOrGrowKillWorker(t *testing.T) { 170 var count uint32 171 172 p, err := NewPooledWorkerPool( 173 1, 174 NewPooledWorkerPoolOptions(). 175 SetGrowOnDemand(true). 176 SetKillWorkerProbability(1.0)) 177 require.NoError(t, err) 178 p.Init() 179 180 var ( 181 wg sync.WaitGroup 182 numIters = testWorkerPoolSize * 2 183 doneCh = make(chan struct{}) 184 ) 185 wg.Add(numIters) 186 187 for i := 0; i < numIters; i++ { 188 // IfGoOrGrow did not allocate new goroutines then 189 // this test would never complete this loop as the 190 // anonymous Work function below would not complete 191 // and would block further iterations. 192 p.Go(func() { 193 atomic.AddUint32(&count, 1) 194 wg.Done() 195 <-doneCh 196 }) 197 } 198 close(doneCh) 199 wg.Wait() 200 201 require.Equal(t, uint32(numIters), count) 202 } 203 204 func TestPooledWorkerPoolGoKillWorker(t *testing.T) { 205 var count uint32 206 207 p, err := NewPooledWorkerPool( 208 testWorkerPoolSize, 209 NewPooledWorkerPoolOptions(). 210 SetKillWorkerProbability(1.0)) 211 require.NoError(t, err) 212 p.Init() 213 214 var wg sync.WaitGroup 215 for i := 0; i < testWorkerPoolSize*2; i++ { 216 wg.Add(1) 217 p.Go(func() { 218 atomic.AddUint32(&count, 1) 219 wg.Done() 220 }) 221 } 222 wg.Wait() 223 224 require.Equal(t, uint32(testWorkerPoolSize*2), count) 225 } 226 227 func TestPooledWorkerPoolSizeTooSmall(t *testing.T) { 228 _, err := NewPooledWorkerPool(0, NewPooledWorkerPoolOptions()) 229 require.Error(t, err) 230 } 231 232 func TestPooledWorkerFast(t *testing.T) { 233 wp, err := NewPooledWorkerPool(1, NewPooledWorkerPoolOptions()) 234 require.NoError(t, err) 235 wp.Init() 236 237 fast := wp.FastContextCheck(3) 238 239 ctx, cancel := context.WithCancel(context.Background()) 240 cancel() 241 242 require.False(t, fast.GoWithContext(ctx, func() {})) 243 require.True(t, fast.GoWithContext(ctx, func() {})) 244 require.True(t, fast.GoWithContext(ctx, func() {})) 245 require.False(t, fast.GoWithContext(ctx, func() {})) 246 require.True(t, fast.GoWithContext(ctx, func() {})) 247 }