github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/sync/pool_test.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Pool is no-op under race detector, so all these tests do not work.
     6  // +build !race
     7  
     8  package sync_test
     9  
    10  import (
    11  	"runtime"
    12  	"runtime/debug"
    13  	"sort"
    14  	. "sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	"time"
    18  )
    19  
    20  func TestPool(t *testing.T) {
    21  	// disable GC so we can control when it happens.
    22  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    23  	var p Pool
    24  	if p.Get() != nil {
    25  		t.Fatal("expected empty")
    26  	}
    27  
    28  	// Make sure that the goroutine doesn't migrate to another P
    29  	// between Put and Get calls.
    30  	Runtime_procPin()
    31  	p.Put("a")
    32  	p.Put("b")
    33  	if g := p.Get(); g != "a" {
    34  		t.Fatalf("got %#v; want a", g)
    35  	}
    36  	if g := p.Get(); g != "b" {
    37  		t.Fatalf("got %#v; want b", g)
    38  	}
    39  	if g := p.Get(); g != nil {
    40  		t.Fatalf("got %#v; want nil", g)
    41  	}
    42  	Runtime_procUnpin()
    43  
    44  	// Put in a large number of objects so they spill into
    45  	// stealable space.
    46  	for i := 0; i < 100; i++ {
    47  		p.Put("c")
    48  	}
    49  	// After one GC, the victim cache should keep them alive.
    50  	runtime.GC()
    51  	if g := p.Get(); g != "c" {
    52  		t.Fatalf("got %#v; want c after GC", g)
    53  	}
    54  	// A second GC should drop the victim cache.
    55  	runtime.GC()
    56  	if g := p.Get(); g != nil {
    57  		t.Fatalf("got %#v; want nil after second GC", g)
    58  	}
    59  }
    60  
    61  func TestPoolNew(t *testing.T) {
    62  	// disable GC so we can control when it happens.
    63  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    64  
    65  	i := 0
    66  	p := Pool{
    67  		New: func() interface{} {
    68  			i++
    69  			return i
    70  		},
    71  	}
    72  	if v := p.Get(); v != 1 {
    73  		t.Fatalf("got %v; want 1", v)
    74  	}
    75  	if v := p.Get(); v != 2 {
    76  		t.Fatalf("got %v; want 2", v)
    77  	}
    78  
    79  	// Make sure that the goroutine doesn't migrate to another P
    80  	// between Put and Get calls.
    81  	Runtime_procPin()
    82  	p.Put(42)
    83  	if v := p.Get(); v != 42 {
    84  		t.Fatalf("got %v; want 42", v)
    85  	}
    86  	Runtime_procUnpin()
    87  
    88  	if v := p.Get(); v != 3 {
    89  		t.Fatalf("got %v; want 3", v)
    90  	}
    91  }
    92  
    93  // Test that Pool does not hold pointers to previously cached resources.
    94  func TestPoolGC(t *testing.T) {
    95  	testPool(t, true)
    96  }
    97  
    98  // Test that Pool releases resources on GC.
    99  func TestPoolRelease(t *testing.T) {
   100  	testPool(t, false)
   101  }
   102  
   103  func testPool(t *testing.T, drain bool) {
   104  	var p Pool
   105  	const N = 100
   106  loop:
   107  	for try := 0; try < 3; try++ {
   108  		if try == 1 && testing.Short() {
   109  			break
   110  		}
   111  		var fin, fin1 uint32
   112  		for i := 0; i < N; i++ {
   113  			v := new(string)
   114  			runtime.SetFinalizer(v, func(vv *string) {
   115  				atomic.AddUint32(&fin, 1)
   116  			})
   117  			p.Put(v)
   118  		}
   119  		if drain {
   120  			for i := 0; i < N; i++ {
   121  				p.Get()
   122  			}
   123  		}
   124  		for i := 0; i < 5; i++ {
   125  			runtime.GC()
   126  			time.Sleep(time.Duration(i*100+10) * time.Millisecond)
   127  			// 1 pointer can remain on stack or elsewhere
   128  			if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
   129  				continue loop
   130  			}
   131  		}
   132  		t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
   133  	}
   134  }
   135  
   136  func TestPoolStress(t *testing.T) {
   137  	const P = 10
   138  	N := int(1e6)
   139  	if testing.Short() {
   140  		N /= 100
   141  	}
   142  	var p Pool
   143  	done := make(chan bool)
   144  	for i := 0; i < P; i++ {
   145  		go func() {
   146  			var v interface{} = 0
   147  			for j := 0; j < N; j++ {
   148  				if v == nil {
   149  					v = 0
   150  				}
   151  				p.Put(v)
   152  				v = p.Get()
   153  				if v != nil && v.(int) != 0 {
   154  					t.Errorf("expect 0, got %v", v)
   155  					break
   156  				}
   157  			}
   158  			done <- true
   159  		}()
   160  	}
   161  	for i := 0; i < P; i++ {
   162  		<-done
   163  	}
   164  }
   165  
   166  func TestPoolDequeue(t *testing.T) {
   167  	testPoolDequeue(t, NewPoolDequeue(16))
   168  }
   169  
   170  func TestPoolChain(t *testing.T) {
   171  	testPoolDequeue(t, NewPoolChain())
   172  }
   173  
   174  func testPoolDequeue(t *testing.T, d PoolDequeue) {
   175  	const P = 10
   176  	var N int = 2e6
   177  	if testing.Short() {
   178  		N = 1e3
   179  	}
   180  	have := make([]int32, N)
   181  	var stop int32
   182  	var wg WaitGroup
   183  	record := func(val int) {
   184  		atomic.AddInt32(&have[val], 1)
   185  		if val == N-1 {
   186  			atomic.StoreInt32(&stop, 1)
   187  		}
   188  	}
   189  
   190  	// Start P-1 consumers.
   191  	for i := 1; i < P; i++ {
   192  		wg.Add(1)
   193  		go func() {
   194  			fail := 0
   195  			for atomic.LoadInt32(&stop) == 0 {
   196  				val, ok := d.PopTail()
   197  				if ok {
   198  					fail = 0
   199  					record(val.(int))
   200  				} else {
   201  					// Speed up the test by
   202  					// allowing the pusher to run.
   203  					if fail++; fail%100 == 0 {
   204  						runtime.Gosched()
   205  					}
   206  				}
   207  			}
   208  			wg.Done()
   209  		}()
   210  	}
   211  
   212  	// Start 1 producer.
   213  	nPopHead := 0
   214  	wg.Add(1)
   215  	go func() {
   216  		for j := 0; j < N; j++ {
   217  			for !d.PushHead(j) {
   218  				// Allow a popper to run.
   219  				runtime.Gosched()
   220  			}
   221  			if j%10 == 0 {
   222  				val, ok := d.PopHead()
   223  				if ok {
   224  					nPopHead++
   225  					record(val.(int))
   226  				}
   227  			}
   228  		}
   229  		wg.Done()
   230  	}()
   231  	wg.Wait()
   232  
   233  	// Check results.
   234  	for i, count := range have {
   235  		if count != 1 {
   236  			t.Errorf("expected have[%d] = 1, got %d", i, count)
   237  		}
   238  	}
   239  	// Check that at least some PopHeads succeeded. We skip this
   240  	// check in short mode because it's common enough that the
   241  	// queue will stay nearly empty all the time and a PopTail
   242  	// will happen during the window between every PushHead and
   243  	// PopHead.
   244  	if !testing.Short() && nPopHead == 0 {
   245  		t.Errorf("popHead never succeeded")
   246  	}
   247  }
   248  
   249  func BenchmarkPool(b *testing.B) {
   250  	var p Pool
   251  	b.RunParallel(func(pb *testing.PB) {
   252  		for pb.Next() {
   253  			p.Put(1)
   254  			p.Get()
   255  		}
   256  	})
   257  }
   258  
   259  func BenchmarkPoolOverflow(b *testing.B) {
   260  	var p Pool
   261  	b.RunParallel(func(pb *testing.PB) {
   262  		for pb.Next() {
   263  			for b := 0; b < 100; b++ {
   264  				p.Put(1)
   265  			}
   266  			for b := 0; b < 100; b++ {
   267  				p.Get()
   268  			}
   269  		}
   270  	})
   271  }
   272  
   273  var globalSink interface{}
   274  
   275  func BenchmarkPoolSTW(b *testing.B) {
   276  	// Take control of GC.
   277  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   278  
   279  	var mstats runtime.MemStats
   280  	var pauses []uint64
   281  
   282  	var p Pool
   283  	for i := 0; i < b.N; i++ {
   284  		// Put a large number of items into a pool.
   285  		const N = 100000
   286  		var item interface{} = 42
   287  		for i := 0; i < N; i++ {
   288  			p.Put(item)
   289  		}
   290  		// Do a GC.
   291  		runtime.GC()
   292  		// Record pause time.
   293  		runtime.ReadMemStats(&mstats)
   294  		pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
   295  	}
   296  
   297  	// Get pause time stats.
   298  	sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] })
   299  	var total uint64
   300  	for _, ns := range pauses {
   301  		total += ns
   302  	}
   303  	// ns/op for this benchmark is average STW time.
   304  	b.ReportMetric(float64(total)/float64(b.N), "ns/op")
   305  	b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
   306  	b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
   307  }
   308  
   309  func BenchmarkPoolExpensiveNew(b *testing.B) {
   310  	// Populate a pool with items that are expensive to construct
   311  	// to stress pool cleanup and subsequent reconstruction.
   312  
   313  	// Create a ballast so the GC has a non-zero heap size and
   314  	// runs at reasonable times.
   315  	globalSink = make([]byte, 8<<20)
   316  	defer func() { globalSink = nil }()
   317  
   318  	// Create a pool that's "expensive" to fill.
   319  	var p Pool
   320  	var nNew uint64
   321  	p.New = func() interface{} {
   322  		atomic.AddUint64(&nNew, 1)
   323  		time.Sleep(time.Millisecond)
   324  		return 42
   325  	}
   326  	var mstats1, mstats2 runtime.MemStats
   327  	runtime.ReadMemStats(&mstats1)
   328  	b.RunParallel(func(pb *testing.PB) {
   329  		// Simulate 100X the number of goroutines having items
   330  		// checked out from the Pool simultaneously.
   331  		items := make([]interface{}, 100)
   332  		var sink []byte
   333  		for pb.Next() {
   334  			// Stress the pool.
   335  			for i := range items {
   336  				items[i] = p.Get()
   337  				// Simulate doing some work with this
   338  				// item checked out.
   339  				sink = make([]byte, 32<<10)
   340  			}
   341  			for i, v := range items {
   342  				p.Put(v)
   343  				items[i] = nil
   344  			}
   345  		}
   346  		_ = sink
   347  	})
   348  	runtime.ReadMemStats(&mstats2)
   349  
   350  	b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
   351  	b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
   352  }