github.com/searKing/golang/go@v1.2.74/exp/sync/fixedpool_test.go (about) 1 // Copyright 2022 The searKing Author. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync_test 6 7 import ( 8 "fmt" 9 "path" 10 "path/filepath" 11 "runtime" 12 "runtime/debug" 13 "sort" 14 "strconv" 15 "sync/atomic" 16 "testing" 17 "time" 18 19 sync_ "github.com/searKing/golang/go/exp/sync" 20 runtime_ "github.com/searKing/golang/go/runtime" 21 ) 22 23 func caller() string { 24 function, file, line := runtime_.GetCallerFuncFileLine(2) 25 return fmt.Sprintf("%s() %s:%d", path.Base(function), filepath.Base(file), line) 26 } 27 28 func testFixedPoolLenAndCap[E any](t *testing.T, p *sync_.FixedPool[E], l, c int) { 29 gotLen := p.Len() 30 gotCap := p.Cap() 31 if (gotLen != l && c >= 0) || (gotCap != c && c >= 0) { 32 t.Fatalf("%s, got %d|%d; want %d|%d", caller(), gotLen, gotCap, l, c) 33 } 34 } 35 36 func TestNewCachePool(t *testing.T) { 37 // disable GC so we can control when it happens. 38 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 39 var p = sync_.NewCachedPool[string](nil) 40 if p.TryGet() != nil { 41 t.Fatal("expected empty") 42 } 43 p.Emplace("a") 44 testFixedPoolLenAndCap(t, p, 1, 1) 45 p.Emplace("b") 46 testFixedPoolLenAndCap(t, p, 2, 2) 47 if g := p.Get(); g.Value != "a" { 48 t.Fatalf("got %#v; want a", g) 49 } 50 testFixedPoolLenAndCap(t, p, 1, 2) 51 if g := p.Get(); g.Value != "b" { 52 t.Fatalf("got %#v; want b", g) 53 } 54 testFixedPoolLenAndCap(t, p, 0, 2) 55 if g := p.TryGet(); g != nil { 56 t.Fatalf("got %#v; want nil", g) 57 } 58 testFixedPoolLenAndCap(t, p, 0, 2) 59 60 // Put in a large number of items, so they spill into 61 // stealable space. 62 n := 100 63 for i := 0; i < n; i++ { 64 p.Emplace("c") 65 testFixedPoolLenAndCap(t, p, i+1, i+1+2) 66 } 67 testFixedPoolLenAndCap(t, p, 100, 102) 68 for i := 0; i < n; i++ { 69 if g := p.Get(); g.Value != "c" { 70 t.Fatalf("got %#v; want a", g) 71 } 72 } 73 testFixedPoolLenAndCap(t, p, 0, 102) 74 if g := p.TryGet(); g != nil { 75 t.Fatalf("got %#v; want nil", g) 76 } 77 testFixedPoolLenAndCap(t, p, 0, 102) 78 } 79 80 func TestNewTempPool(t *testing.T) { 81 // disable GC so we can control when it happens. 82 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 83 var p = sync_.NewTempPool[string](nil) 84 if p.TryGet() != nil { 85 t.Fatal("expected empty") 86 } 87 testFixedPoolLenAndCap(t, p, 0, 0) 88 p.Emplace("a") 89 90 testFixedPoolLenAndCap(t, p, 1, 1) 91 p.Emplace("b") 92 testFixedPoolLenAndCap(t, p, 2, 2) 93 if g := p.Get(); g.Value != "a" { 94 t.Fatalf("got %#v; want a", g) 95 } 96 testFixedPoolLenAndCap(t, p, 1, 2) 97 if g := p.Get(); g.Value != "b" { 98 t.Fatalf("got %#v; want b", g) 99 } 100 testFixedPoolLenAndCap(t, p, 0, 2) 101 102 // Put in a large number of items, so they spill into 103 // stealable space. 104 for i := 0; i < 100; i++ { 105 p.Emplace("c") 106 testFixedPoolLenAndCap(t, p, i+1, i+1+2) 107 } 108 testFixedPoolLenAndCap(t, p, 100, 102) 109 // After one GC, the victim cache should keep them alive. 110 runtime.GC() 111 // drop all the items taken by Get and not be referenced by any 112 testFixedPoolLenAndCap(t, p, 100, 100) 113 if g := p.Get(); g.Value != "c" { 114 t.Fatalf("got %#v; want c after GC", g) 115 } 116 testFixedPoolLenAndCap(t, p, 99, 100) 117 // A second GC should drop the victim cache. 118 runtime.GC() 119 testFixedPoolLenAndCap(t, p, 0, 0) 120 if g := p.TryGet(); g != nil { 121 t.Fatalf("got %#v; want nil after second GC", g) 122 } 123 testFixedPoolLenAndCap(t, p, 0, 0) 124 } 125 126 func TestFixedPoolNilNew(t *testing.T) { 127 // disable GC so we can control when it happens. 128 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 129 const LEN = 10 130 const CAP = 20 131 var p = (&sync_.FixedPool[string]{ 132 New: nil, 133 MinResidentSize: 0, 134 MaxResidentSize: LEN, 135 MaxCapacity: CAP, 136 }).Init() 137 138 testFixedPoolLenAndCap(t, p, 0, 0) 139 140 if p.TryGet() != nil { 141 t.Fatal("expected empty") 142 } 143 p.Emplace("a") 144 testFixedPoolLenAndCap(t, p, 1, 1) 145 p.Emplace("b") 146 testFixedPoolLenAndCap(t, p, 2, 2) 147 if g := p.Get(); g.Value != "a" { 148 t.Fatalf("got %#v; want a", g) 149 } 150 testFixedPoolLenAndCap(t, p, 1, 2) 151 if g := p.Get(); g.Value != "b" { 152 t.Fatalf("got %#v; want b", g) 153 } 154 testFixedPoolLenAndCap(t, p, 0, 2) 155 if g := p.TryGet(); g != nil { 156 t.Fatalf("got %#v; want nil", g) 157 } 158 testFixedPoolLenAndCap(t, p, 0, 2) 159 160 // Put in a large number of items, so they spill into 161 // stealable space. 162 for i := 0; i < 100; i++ { 163 p.Emplace("c") 164 testFixedPoolLenAndCap(t, p, i+1, i+1+2) 165 } 166 testFixedPoolLenAndCap(t, p, 100, 102) 167 // After one GC, the victim cache should keep them alive. 168 runtime.GC() 169 testFixedPoolLenAndCap(t, p, 100, 100) 170 if g := p.Get(); g.Value != "c" { 171 t.Fatalf("got %#v; want c after GC", g) 172 } 173 testFixedPoolLenAndCap(t, p, 99, 100) 174 // A second GC should drop the victim cache, try put into local first. 175 runtime.GC() 176 testFixedPoolLenAndCap(t, p, LEN, LEN) 177 178 // drain keep-alive cache 179 for i := 0; i < LEN; i++ { 180 if g := p.Get(); g == nil || g.Value != "c" { 181 t.Fatalf("#%d: got %#v; want c after GC", i, g) 182 } 183 } 184 if g := p.TryGet(); g != nil { 185 t.Fatalf("got %#v; want nil after second GC", g) 186 } 187 testFixedPoolLenAndCap(t, p, 0, LEN) 188 // After one GC, the victim cache should keep them alive. 189 // After one GC, the got object will be GC, as no reference 190 runtime.GC() 191 testFixedPoolLenAndCap(t, p, LEN, LEN) 192 // A second GC should drop the victim cache, try put into local first. 193 runtime.GC() 194 testFixedPoolLenAndCap(t, p, LEN, LEN) 195 } 196 197 func TestFixedPoolNew(t *testing.T) { 198 // disable GC so we can control when it happens. 199 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 200 201 const MinLen = 2 202 const LEN = 10 203 const CAP = 20 204 i := 0 205 var p = (&sync_.FixedPool[int]{ 206 New: func() int { 207 i++ 208 return i 209 }, 210 MinResidentSize: MinLen, 211 MaxResidentSize: LEN, 212 MaxCapacity: CAP, 213 }).Init() 214 testFixedPoolLenAndCap(t, p, MinLen, MinLen) 215 216 if v := p.Get(); v.Value != 1 { 217 t.Fatalf("got %v; want 1", v.Value) 218 } 219 if v := p.Get(); v.Value != 2 { 220 t.Fatalf("got %v; want 2", v.Value) 221 } 222 223 p.Emplace(42) 224 if v := p.Get(); v.Value != 42 { 225 t.Fatalf("got %v; want 42", v) 226 } 227 228 if v := p.Get(); v.Value != 3 { 229 t.Fatalf("got %v; want 3", v) 230 } 231 } 232 233 func TestFixedPoolGCRetryPut(t *testing.T) { 234 // disable GC so we can control when it happens. 235 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 236 const LEN = 1 237 const CAP = 2 238 var p = (&sync_.FixedPool[string]{ 239 New: nil, 240 MinResidentSize: 0, 241 MaxResidentSize: LEN, 242 MaxCapacity: CAP, 243 }).Init() 244 245 testFixedPoolLenAndCap(t, p, 0, 0) 246 247 if p.TryGet() != nil { 248 t.Fatal("expected empty") 249 } 250 251 // Put in a large number of items, so they spill into 252 // stealable space. 253 var N = 4 254 for i := 0; i < N; i++ { 255 p.Emplace(strconv.Itoa(i)) 256 testFixedPoolLenAndCap(t, p, i+1, i+1) 257 } 258 testFixedPoolLenAndCap(t, p, N, N) 259 // After one GC, the victim cache should keep them alive. 260 runtime.GC() 261 testFixedPoolLenAndCap(t, p, N, N) 262 if g := p.Get(); g.Value != "0" { 263 t.Fatalf("got %#v; want c after GC", g) 264 } 265 testFixedPoolLenAndCap(t, p, N-1, N) 266 // A second GC should drop the victim cache, try put into local first. 267 runtime.GC() 268 testFixedPoolLenAndCap(t, p, LEN, LEN) 269 270 // drain keep-alive cache 271 for i := 1; i < LEN+1; i++ { 272 if g := p.Get(); g == nil { 273 t.Fatalf("#%d: got nil; want %q after GC", i, strconv.Itoa(i)) 274 } 275 } 276 testFixedPoolLenAndCap(t, p, 0, LEN) 277 { 278 if g := p.TryGet(); g != nil { 279 t.Fatalf("got %#v; want nil after second GC", g.Value) 280 } 281 } 282 testFixedPoolLenAndCap(t, p, 0, LEN) 283 // After one GC, the victim cache should keep them alive. 284 runtime.GC() 285 testFixedPoolLenAndCap(t, p, LEN, LEN) 286 // A second GC should drop the victim cache, try put into local first. 287 runtime.GC() 288 testFixedPoolLenAndCap(t, p, LEN, LEN) 289 { 290 if g := p.TryGet(); g == nil { 291 t.Fatalf("got nil; want %q after GC", g.Value) 292 } 293 } 294 testFixedPoolLenAndCap(t, p, 0, LEN) 295 } 296 297 func TestFixedPoolGCReFillLocal(t *testing.T) { 298 // disable GC so we can control when it happens. 299 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 300 const LEN = 1 301 const CAP = 2 302 var p = (&sync_.FixedPool[string]{ 303 New: nil, 304 MinResidentSize: 0, 305 MaxResidentSize: LEN, 306 MaxCapacity: CAP, 307 }).Init() 308 // Put in a large number of items, so they spill into 309 // stealable space. 310 for i := 0; i < CAP*2; i++ { 311 p.Emplace(strconv.Itoa(i)) 312 } 313 testFixedPoolLenAndCap(t, p, CAP*2, CAP*2) 314 315 // drain all cache 316 for i := 0; i < 2*CAP; i++ { 317 g := p.Get() 318 if i < LEN { 319 if g == nil || g.Value != strconv.Itoa(i) { 320 t.Fatalf("#%d: got %#v; want %q after GC", i, g, strconv.Itoa(i)) 321 } 322 } else { 323 if g == nil { 324 t.Fatalf("#%d: got %#v; want %q after GC", i, g, strconv.Itoa(i)) 325 } 326 } 327 testFixedPoolLenAndCap(t, p, CAP*2-i-1, CAP*2) 328 } 329 testFixedPoolLenAndCap(t, p, 0, CAP*2) 330 if g := p.TryGet(); g != nil { 331 t.Fatalf("got %#v; want nil after second GC", g) 332 } 333 testFixedPoolLenAndCap(t, p, 0, CAP*2) 334 335 // After one GC, the victim cache should keep them alive. 336 runtime.GC() 337 testFixedPoolLenAndCap(t, p, LEN, LEN) 338 // A second GC should drop the victim cache, try put into local first. 339 runtime.GC() 340 testFixedPoolLenAndCap(t, p, LEN, LEN) 341 342 // drain all cache 343 for i := 0; i < LEN; i++ { 344 g := p.Get() 345 if g == nil { 346 t.Fatalf("#%d: got nil; want not nil after GC", i) 347 } 348 } 349 testFixedPoolLenAndCap(t, p, 0, LEN) 350 if g := p.TryGet(); g != nil { 351 t.Fatalf("got %#v; want nil after second GC", g) 352 } 353 testFixedPoolLenAndCap(t, p, 0, LEN) 354 } 355 356 // Test that Pool does not hold pointers to previously cached resources. 357 func TestFixedPoolGC(t *testing.T) { 358 testFixedPool(t, true) 359 } 360 361 // Test that Pool releases resources on GC. 362 func TestFixedPoolRelease(t *testing.T) { 363 testFixedPool(t, false) 364 } 365 366 func testFixedPool(t *testing.T, drain bool) { 367 var p sync_.FixedPool[*string] 368 const N = 100 369 loop: 370 for try := 0; try < 3; try++ { 371 if try == 1 && testing.Short() { 372 testFixedPoolLenAndCap(t, &p, 0, 0) 373 break 374 } 375 var fin, fin1 uint32 376 for i := 0; i < N; i++ { 377 v := new(string) 378 runtime.SetFinalizer(v, func(vv *string) { 379 atomic.AddUint32(&fin, 1) 380 }) 381 p.Emplace(v) 382 } 383 if drain { 384 for i := 0; i < N; i++ { 385 p.Get() 386 } 387 } 388 for i := 0; i < 5; i++ { 389 runtime.GC() 390 time.Sleep(time.Duration(i*100+10) * time.Millisecond) 391 // 1 pointer can remain on stack or elsewhere 392 if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { 393 continue loop 394 } 395 } 396 t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) 397 } 398 } 399 400 func TestFixedPoolStress(t *testing.T) { 401 const P = 10 402 N := int(1e6) 403 if testing.Short() { 404 N /= 100 405 } 406 var p sync_.FixedPool[any] 407 done := make(chan bool) 408 for i := 0; i < P; i++ { 409 go func() { 410 var v any = 0 411 for j := 0; j < N; j++ { 412 if v == nil { 413 v = 0 414 } 415 p.Emplace(v) 416 e := p.Get() 417 if e != nil && e.Value != 0 { 418 t.Errorf("expect 0, got %v", v) 419 break 420 } 421 } 422 done <- true 423 }() 424 } 425 for i := 0; i < P; i++ { 426 <-done 427 } 428 } 429 430 func BenchmarkFixedPool(b *testing.B) { 431 var p sync_.FixedPool[int] 432 b.RunParallel(func(pb *testing.PB) { 433 for pb.Next() { 434 p.Emplace(1) 435 p.Get() 436 } 437 }) 438 } 439 440 func BenchmarkPoolOverflow(b *testing.B) { 441 var p sync_.FixedPool[int] 442 b.RunParallel(func(pb *testing.PB) { 443 for pb.Next() { 444 for b := 0; b < 100; b++ { 445 p.Emplace(1) 446 } 447 for b := 0; b < 100; b++ { 448 p.Get() 449 } 450 } 451 }) 452 } 453 454 // Simulate object starvation in order to force Ps to steal items 455 // from other Ps. 456 func BenchmarkPoolStarvation(b *testing.B) { 457 var p sync_.FixedPool[int] 458 count := 100 459 // Reduce number of putted items by 33 %. It creates items starvation 460 // that force P-local storage to steal items from other Ps. 461 countStarved := count - int(float32(count)*0.33) 462 b.RunParallel(func(pb *testing.PB) { 463 for pb.Next() { 464 for b := 0; b < countStarved; b++ { 465 p.Emplace(1) 466 } 467 for b := 0; b < count; b++ { 468 p.Get() 469 } 470 } 471 }) 472 } 473 474 var globalSink any 475 476 func BenchmarkPoolSTW(b *testing.B) { 477 // Take control of GC. 478 defer debug.SetGCPercent(debug.SetGCPercent(-1)) 479 480 var mstats runtime.MemStats 481 var pauses []uint64 482 483 var p sync_.FixedPool[any] 484 for i := 0; i < b.N; i++ { 485 // Put a large number of items into a pool. 486 const N = 100000 487 var item any = 42 488 for i := 0; i < N; i++ { 489 p.Emplace(item) 490 } 491 // Do a GC. 492 runtime.GC() 493 // Record pause time. 494 runtime.ReadMemStats(&mstats) 495 pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256]) 496 } 497 498 // Get pause time stats. 499 sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] }) 500 var total uint64 501 for _, ns := range pauses { 502 total += ns 503 } 504 // ns/op for this benchmark is average STW time. 505 b.ReportMetric(float64(total)/float64(b.N), "ns/op") 506 b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW") 507 b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW") 508 } 509 510 func BenchmarkPoolExpensiveNew(b *testing.B) { 511 // Populate a pool with items that are expensive to construct 512 // to stress pool cleanup and subsequent reconstruction. 513 514 // Create a ballast so the GC has a non-zero heap size and 515 // runs at reasonable times. 516 globalSink = make([]byte, 8<<20) 517 defer func() { globalSink = nil }() 518 519 // Create a pool that's "expensive" to fill. 520 var p sync_.FixedPool[any] 521 var nNew uint64 522 p.New = func() any { 523 atomic.AddUint64(&nNew, 1) 524 time.Sleep(time.Millisecond) 525 return 42 526 } 527 var mstats1, mstats2 runtime.MemStats 528 runtime.ReadMemStats(&mstats1) 529 b.RunParallel(func(pb *testing.PB) { 530 // Simulate 100X the number of goroutines having items 531 // checked out from the Pool simultaneously. 532 items := make([]*sync_.FixedPoolElement[any], 100) 533 var sink []byte 534 for pb.Next() { 535 // Stress the pool. 536 for i := range items { 537 items[i] = p.Get() 538 // Simulate doing some work with this 539 // item checked out. 540 sink = make([]byte, 32<<10) 541 } 542 for i, v := range items { 543 p.Put(v) 544 items[i] = nil 545 } 546 } 547 _ = sink 548 }) 549 runtime.ReadMemStats(&mstats2) 550 551 b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op") 552 b.ReportMetric(float64(nNew)/float64(b.N), "New/op") 553 }