github.com/guyezi/gofrontend@v0.0.0-20200228202240-7a62a49e62c0/libgo/go/sync/pool_test.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Pool is no-op under race detector, so all these tests do not work. 6 // +build !race 7 8 package sync_test 9 10 import ( 11 "runtime" 12 "runtime/debug" 13 "sort" 14 . "sync" 15 "sync/atomic" 16 "testing" 17 "time" 18 ) 19 20 func TestPool(t *testing.T) { 21 // disable GC so we can control when it happens. 22 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 23 var p Pool 24 if p.Get() != nil { 25 t.Fatal("expected empty") 26 } 27 28 // Make sure that the goroutine doesn't migrate to another P 29 // between Put and Get calls. 30 Runtime_procPin() 31 p.Put("a") 32 p.Put("b") 33 if g := p.Get(); g != "a" { 34 t.Fatalf("got %#v; want a", g) 35 } 36 if g := p.Get(); g != "b" { 37 t.Fatalf("got %#v; want b", g) 38 } 39 if g := p.Get(); g != nil { 40 t.Fatalf("got %#v; want nil", g) 41 } 42 Runtime_procUnpin() 43 44 // Put in a large number of objects so they spill into 45 // stealable space. 46 for i := 0; i < 100; i++ { 47 p.Put("c") 48 } 49 // After one GC, the victim cache should keep them alive. 50 runtime.GC() 51 if g := p.Get(); g != "c" { 52 t.Fatalf("got %#v; want c after GC", g) 53 } 54 // A second GC should drop the victim cache. 55 runtime.GC() 56 if g := p.Get(); g != nil { 57 t.Fatalf("got %#v; want nil after second GC", g) 58 } 59 } 60 61 func TestPoolNew(t *testing.T) { 62 // disable GC so we can control when it happens. 63 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 64 65 i := 0 66 p := Pool{ 67 New: func() interface{} { 68 i++ 69 return i 70 }, 71 } 72 if v := p.Get(); v != 1 { 73 t.Fatalf("got %v; want 1", v) 74 } 75 if v := p.Get(); v != 2 { 76 t.Fatalf("got %v; want 2", v) 77 } 78 79 // Make sure that the goroutine doesn't migrate to another P 80 // between Put and Get calls. 81 Runtime_procPin() 82 p.Put(42) 83 if v := p.Get(); v != 42 { 84 t.Fatalf("got %v; want 42", v) 85 } 86 Runtime_procUnpin() 87 88 if v := p.Get(); v != 3 { 89 t.Fatalf("got %v; want 3", v) 90 } 91 } 92 93 // Test that Pool does not hold pointers to previously cached resources. 94 func TestPoolGC(t *testing.T) { 95 testPool(t, true) 96 } 97 98 // Test that Pool releases resources on GC. 99 func TestPoolRelease(t *testing.T) { 100 testPool(t, false) 101 } 102 103 func testPool(t *testing.T, drain bool) { 104 t.Skip("gccgo imprecise GC breaks this test") 105 var p Pool 106 const N = 100 107 loop: 108 for try := 0; try < 3; try++ { 109 if try == 1 && testing.Short() { 110 break 111 } 112 var fin, fin1 uint32 113 for i := 0; i < N; i++ { 114 v := new(string) 115 runtime.SetFinalizer(v, func(vv *string) { 116 atomic.AddUint32(&fin, 1) 117 }) 118 p.Put(v) 119 } 120 if drain { 121 for i := 0; i < N; i++ { 122 p.Get() 123 } 124 } 125 for i := 0; i < 5; i++ { 126 runtime.GC() 127 time.Sleep(time.Duration(i*100+10) * time.Millisecond) 128 // 1 pointer can remain on stack or elsewhere 129 if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { 130 continue loop 131 } 132 } 133 t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) 134 } 135 } 136 137 func TestPoolStress(t *testing.T) { 138 const P = 10 139 N := int(1e6) 140 if testing.Short() { 141 N /= 100 142 } 143 var p Pool 144 done := make(chan bool) 145 for i := 0; i < P; i++ { 146 go func() { 147 var v interface{} = 0 148 for j := 0; j < N; j++ { 149 if v == nil { 150 v = 0 151 } 152 p.Put(v) 153 v = p.Get() 154 if v != nil && v.(int) != 0 { 155 t.Errorf("expect 0, got %v", v) 156 break 157 } 158 } 159 done <- true 160 }() 161 } 162 for i := 0; i < P; i++ { 163 <-done 164 } 165 } 166 167 func TestPoolDequeue(t *testing.T) { 168 testPoolDequeue(t, NewPoolDequeue(16)) 169 } 170 171 func TestPoolChain(t *testing.T) { 172 testPoolDequeue(t, NewPoolChain()) 173 } 174 175 func testPoolDequeue(t *testing.T, d PoolDequeue) { 176 const P = 10 177 var N int = 2e6 178 if testing.Short() { 179 N = 1e3 180 } 181 have := make([]int32, N) 182 var stop int32 183 var wg WaitGroup 184 record := func(val int) { 185 atomic.AddInt32(&have[val], 1) 186 if val == N-1 { 187 atomic.StoreInt32(&stop, 1) 188 } 189 } 190 191 // Start P-1 consumers. 192 for i := 1; i < P; i++ { 193 wg.Add(1) 194 go func() { 195 fail := 0 196 for atomic.LoadInt32(&stop) == 0 { 197 val, ok := d.PopTail() 198 if ok { 199 fail = 0 200 record(val.(int)) 201 } else { 202 // Speed up the test by 203 // allowing the pusher to run. 204 if fail++; fail%100 == 0 { 205 runtime.Gosched() 206 } 207 } 208 } 209 wg.Done() 210 }() 211 } 212 213 // Start 1 producer. 214 nPopHead := 0 215 wg.Add(1) 216 go func() { 217 for j := 0; j < N; j++ { 218 for !d.PushHead(j) { 219 // Allow a popper to run. 220 runtime.Gosched() 221 } 222 if j%10 == 0 { 223 val, ok := d.PopHead() 224 if ok { 225 nPopHead++ 226 record(val.(int)) 227 } 228 } 229 } 230 wg.Done() 231 }() 232 wg.Wait() 233 234 // Check results. 235 for i, count := range have { 236 if count != 1 { 237 t.Errorf("expected have[%d] = 1, got %d", i, count) 238 } 239 } 240 // Check that at least some PopHeads succeeded. We skip this 241 // check in short mode because it's common enough that the 242 // queue will stay nearly empty all the time and a PopTail 243 // will happen during the window between every PushHead and 244 // PopHead. 245 if !testing.Short() && nPopHead == 0 { 246 t.Errorf("popHead never succeeded") 247 } 248 } 249 250 func BenchmarkPool(b *testing.B) { 251 var p Pool 252 b.RunParallel(func(pb *testing.PB) { 253 for pb.Next() { 254 p.Put(1) 255 p.Get() 256 } 257 }) 258 } 259 260 func BenchmarkPoolOverflow(b *testing.B) { 261 var p Pool 262 b.RunParallel(func(pb *testing.PB) { 263 for pb.Next() { 264 for b := 0; b < 100; b++ { 265 p.Put(1) 266 } 267 for b := 0; b < 100; b++ { 268 p.Get() 269 } 270 } 271 }) 272 } 273 274 var globalSink interface{} 275 276 func BenchmarkPoolSTW(b *testing.B) { 277 // Take control of GC. 278 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 279 280 var mstats runtime.MemStats 281 var pauses []uint64 282 283 var p Pool 284 for i := 0; i < b.N; i++ { 285 // Put a large number of items into a pool. 286 const N = 100000 287 var item interface{} = 42 288 for i := 0; i < N; i++ { 289 p.Put(item) 290 } 291 // Do a GC. 292 runtime.GC() 293 // Record pause time. 294 runtime.ReadMemStats(&mstats) 295 pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256]) 296 } 297 298 // Get pause time stats. 299 sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] }) 300 var total uint64 301 for _, ns := range pauses { 302 total += ns 303 } 304 // ns/op for this benchmark is average STW time. 305 b.ReportMetric(float64(total)/float64(b.N), "ns/op") 306 b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW") 307 b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW") 308 } 309 310 func BenchmarkPoolExpensiveNew(b *testing.B) { 311 // Populate a pool with items that are expensive to construct 312 // to stress pool cleanup and subsequent reconstruction. 313 314 // Create a ballast so the GC has a non-zero heap size and 315 // runs at reasonable times. 316 globalSink = make([]byte, 8<<20) 317 defer func() { globalSink = nil }() 318 319 // Create a pool that's "expensive" to fill. 320 var p Pool 321 var nNew uint64 322 p.New = func() interface{} { 323 atomic.AddUint64(&nNew, 1) 324 time.Sleep(time.Millisecond) 325 return 42 326 } 327 var mstats1, mstats2 runtime.MemStats 328 runtime.ReadMemStats(&mstats1) 329 b.RunParallel(func(pb *testing.PB) { 330 // Simulate 100X the number of goroutines having items 331 // checked out from the Pool simultaneously. 332 items := make([]interface{}, 100) 333 var sink []byte 334 for pb.Next() { 335 // Stress the pool. 336 for i := range items { 337 items[i] = p.Get() 338 // Simulate doing some work with this 339 // item checked out. 340 sink = make([]byte, 32<<10) 341 } 342 for i, v := range items { 343 p.Put(v) 344 items[i] = nil 345 } 346 } 347 _ = sink 348 }) 349 runtime.ReadMemStats(&mstats2) 350 351 b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op") 352 b.ReportMetric(float64(nNew)/float64(b.N), "New/op") 353 }