github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/sync/pool_test.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Pool is no-op under race detector, so all these tests do not work. 6 // 7 //go:build !race 8 9 package sync_test 10 11 import ( 12 "runtime" 13 "runtime/debug" 14 "sort" 15 . "sync" 16 "sync/atomic" 17 "testing" 18 "time" 19 ) 20 21 func TestPool(t *testing.T) { 22 // disable GC so we can control when it happens. 23 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 24 var p Pool 25 if p.Get() != nil { 26 t.Fatal("expected empty") 27 } 28 29 // Make sure that the goroutine doesn't migrate to another P 30 // between Put and Get calls. 31 Runtime_procPin() 32 p.Put("a") 33 p.Put("b") 34 if g := p.Get(); g != "a" { 35 t.Fatalf("got %#v; want a", g) 36 } 37 if g := p.Get(); g != "b" { 38 t.Fatalf("got %#v; want b", g) 39 } 40 if g := p.Get(); g != nil { 41 t.Fatalf("got %#v; want nil", g) 42 } 43 Runtime_procUnpin() 44 45 // Put in a large number of objects so they spill into 46 // stealable space. 47 for i := 0; i < 100; i++ { 48 p.Put("c") 49 } 50 // After one GC, the victim cache should keep them alive. 51 runtime.GC() 52 if g := p.Get(); g != "c" { 53 t.Fatalf("got %#v; want c after GC", g) 54 } 55 // A second GC should drop the victim cache. 56 runtime.GC() 57 if g := p.Get(); g != nil { 58 t.Fatalf("got %#v; want nil after second GC", g) 59 } 60 } 61 62 func TestPoolNew(t *testing.T) { 63 // disable GC so we can control when it happens. 64 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 65 66 i := 0 67 p := Pool{ 68 New: func() any { 69 i++ 70 return i 71 }, 72 } 73 if v := p.Get(); v != 1 { 74 t.Fatalf("got %v; want 1", v) 75 } 76 if v := p.Get(); v != 2 { 77 t.Fatalf("got %v; want 2", v) 78 } 79 80 // Make sure that the goroutine doesn't migrate to another P 81 // between Put and Get calls. 82 Runtime_procPin() 83 p.Put(42) 84 if v := p.Get(); v != 42 { 85 t.Fatalf("got %v; want 42", v) 86 } 87 Runtime_procUnpin() 88 89 if v := p.Get(); v != 3 { 90 t.Fatalf("got %v; want 3", v) 91 } 92 } 93 94 // Test that Pool does not hold pointers to previously cached resources. 95 func TestPoolGC(t *testing.T) { 96 testPool(t, true) 97 } 98 99 // Test that Pool releases resources on GC. 100 func TestPoolRelease(t *testing.T) { 101 testPool(t, false) 102 } 103 104 func testPool(t *testing.T, drain bool) { 105 var p Pool 106 const N = 100 107 loop: 108 for try := 0; try < 3; try++ { 109 if try == 1 && testing.Short() { 110 break 111 } 112 var fin, fin1 uint32 113 for i := 0; i < N; i++ { 114 v := new(string) 115 runtime.SetFinalizer(v, func(vv *string) { 116 atomic.AddUint32(&fin, 1) 117 }) 118 p.Put(v) 119 } 120 if drain { 121 for i := 0; i < N; i++ { 122 p.Get() 123 } 124 } 125 for i := 0; i < 5; i++ { 126 runtime.GC() 127 time.Sleep(time.Duration(i*100+10) * time.Millisecond) 128 // 1 pointer can remain on stack or elsewhere 129 if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { 130 continue loop 131 } 132 } 133 t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) 134 } 135 } 136 137 func TestPoolStress(t *testing.T) { 138 const P = 10 139 N := int(1e6) 140 if testing.Short() { 141 N /= 100 142 } 143 var p Pool 144 done := make(chan bool) 145 for i := 0; i < P; i++ { 146 go func() { 147 var v any = 0 148 for j := 0; j < N; j++ { 149 if v == nil { 150 v = 0 151 } 152 p.Put(v) 153 v = p.Get() 154 if v != nil && v.(int) != 0 { 155 t.Errorf("expect 0, got %v", v) 156 break 157 } 158 } 159 done <- true 160 }() 161 } 162 for i := 0; i < P; i++ { 163 <-done 164 } 165 } 166 167 func TestPoolDequeue(t *testing.T) { 168 testPoolDequeue(t, NewPoolDequeue(16)) 169 } 170 171 func TestPoolChain(t *testing.T) { 172 testPoolDequeue(t, NewPoolChain()) 173 } 174 175 func testPoolDequeue(t *testing.T, d PoolDequeue) { 176 const P = 10 177 var N int = 2e6 178 if testing.Short() { 179 N = 1e3 180 } 181 have := make([]int32, N) 182 var stop int32 183 var wg WaitGroup 184 record := func(val int) { 185 atomic.AddInt32(&have[val], 1) 186 if val == N-1 { 187 atomic.StoreInt32(&stop, 1) 188 } 189 } 190 191 // Start P-1 consumers. 192 for i := 1; i < P; i++ { 193 wg.Add(1) 194 go func() { 195 fail := 0 196 for atomic.LoadInt32(&stop) == 0 { 197 val, ok := d.PopTail() 198 if ok { 199 fail = 0 200 record(val.(int)) 201 } else { 202 // Speed up the test by 203 // allowing the pusher to run. 204 if fail++; fail%100 == 0 { 205 runtime.Gosched() 206 } 207 } 208 } 209 wg.Done() 210 }() 211 } 212 213 // Start 1 producer. 214 nPopHead := 0 215 wg.Add(1) 216 go func() { 217 for j := 0; j < N; j++ { 218 for !d.PushHead(j) { 219 // Allow a popper to run. 220 runtime.Gosched() 221 } 222 if j%10 == 0 { 223 val, ok := d.PopHead() 224 if ok { 225 nPopHead++ 226 record(val.(int)) 227 } 228 } 229 } 230 wg.Done() 231 }() 232 wg.Wait() 233 234 // Check results. 235 for i, count := range have { 236 if count != 1 { 237 t.Errorf("expected have[%d] = 1, got %d", i, count) 238 } 239 } 240 // Check that at least some PopHeads succeeded. We skip this 241 // check in short mode because it's common enough that the 242 // queue will stay nearly empty all the time and a PopTail 243 // will happen during the window between every PushHead and 244 // PopHead. 245 if !testing.Short() && nPopHead == 0 { 246 t.Errorf("popHead never succeeded") 247 } 248 } 249 250 func BenchmarkPool(b *testing.B) { 251 var p Pool 252 b.RunParallel(func(pb *testing.PB) { 253 for pb.Next() { 254 p.Put(1) 255 p.Get() 256 } 257 }) 258 } 259 260 func BenchmarkPoolOverflow(b *testing.B) { 261 var p Pool 262 b.RunParallel(func(pb *testing.PB) { 263 for pb.Next() { 264 for b := 0; b < 100; b++ { 265 p.Put(1) 266 } 267 for b := 0; b < 100; b++ { 268 p.Get() 269 } 270 } 271 }) 272 } 273 274 // Simulate object starvation in order to force Ps to steal objects 275 // from other Ps. 276 func BenchmarkPoolStarvation(b *testing.B) { 277 var p Pool 278 count := 100 279 // Reduce number of putted objects by 33 %. It creates objects starvation 280 // that force P-local storage to steal objects from other Ps. 281 countStarved := count - int(float32(count)*0.33) 282 b.RunParallel(func(pb *testing.PB) { 283 for pb.Next() { 284 for b := 0; b < countStarved; b++ { 285 p.Put(1) 286 } 287 for b := 0; b < count; b++ { 288 p.Get() 289 } 290 } 291 }) 292 } 293 294 var globalSink any 295 296 func BenchmarkPoolSTW(b *testing.B) { 297 // Take control of GC. 298 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 299 300 var mstats runtime.MemStats 301 var pauses []uint64 302 303 var p Pool 304 for i := 0; i < b.N; i++ { 305 // Put a large number of items into a pool. 306 const N = 100000 307 var item any = 42 308 for i := 0; i < N; i++ { 309 p.Put(item) 310 } 311 // Do a GC. 312 runtime.GC() 313 // Record pause time. 314 runtime.ReadMemStats(&mstats) 315 pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256]) 316 } 317 318 // Get pause time stats. 319 sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] }) 320 var total uint64 321 for _, ns := range pauses { 322 total += ns 323 } 324 // ns/op for this benchmark is average STW time. 325 b.ReportMetric(float64(total)/float64(b.N), "ns/op") 326 b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW") 327 b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW") 328 } 329 330 func BenchmarkPoolExpensiveNew(b *testing.B) { 331 // Populate a pool with items that are expensive to construct 332 // to stress pool cleanup and subsequent reconstruction. 333 334 // Create a ballast so the GC has a non-zero heap size and 335 // runs at reasonable times. 336 globalSink = make([]byte, 8<<20) 337 defer func() { globalSink = nil }() 338 339 // Create a pool that's "expensive" to fill. 340 var p Pool 341 var nNew uint64 342 p.New = func() any { 343 atomic.AddUint64(&nNew, 1) 344 time.Sleep(time.Millisecond) 345 return 42 346 } 347 var mstats1, mstats2 runtime.MemStats 348 runtime.ReadMemStats(&mstats1) 349 b.RunParallel(func(pb *testing.PB) { 350 // Simulate 100X the number of goroutines having items 351 // checked out from the Pool simultaneously. 352 items := make([]any, 100) 353 var sink []byte 354 for pb.Next() { 355 // Stress the pool. 356 for i := range items { 357 items[i] = p.Get() 358 // Simulate doing some work with this 359 // item checked out. 360 sink = make([]byte, 32<<10) 361 } 362 for i, v := range items { 363 p.Put(v) 364 items[i] = nil 365 } 366 } 367 _ = sink 368 }) 369 runtime.ReadMemStats(&mstats2) 370 371 b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op") 372 b.ReportMetric(float64(nNew)/float64(b.N), "New/op") 373 }