github.com/luckypickle/go-ethereum-vet@v1.14.2/metrics/sample_test.go (about)

     1  package metrics
     2  
     3  import (
     4  	"math"
     5  	"math/rand"
     6  	"runtime"
     7  	"testing"
     8  	"time"
     9  )
    10  
    11  // Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
    12  // expensive computations like Variance, the cost of copying the Sample, as
    13  // approximated by a make and copy, is much greater than the cost of the
    14  // computation for small samples and only slightly less for large samples.
    15  func BenchmarkCompute1000(b *testing.B) {
    16  	s := make([]int64, 1000)
    17  	for i := 0; i < len(s); i++ {
    18  		s[i] = int64(i)
    19  	}
    20  	b.ResetTimer()
    21  	for i := 0; i < b.N; i++ {
    22  		SampleVariance(s)
    23  	}
    24  }
    25  func BenchmarkCompute1000000(b *testing.B) {
    26  	s := make([]int64, 1000000)
    27  	for i := 0; i < len(s); i++ {
    28  		s[i] = int64(i)
    29  	}
    30  	b.ResetTimer()
    31  	for i := 0; i < b.N; i++ {
    32  		SampleVariance(s)
    33  	}
    34  }
    35  func BenchmarkCopy1000(b *testing.B) {
    36  	s := make([]int64, 1000)
    37  	for i := 0; i < len(s); i++ {
    38  		s[i] = int64(i)
    39  	}
    40  	b.ResetTimer()
    41  	for i := 0; i < b.N; i++ {
    42  		sCopy := make([]int64, len(s))
    43  		copy(sCopy, s)
    44  	}
    45  }
    46  func BenchmarkCopy1000000(b *testing.B) {
    47  	s := make([]int64, 1000000)
    48  	for i := 0; i < len(s); i++ {
    49  		s[i] = int64(i)
    50  	}
    51  	b.ResetTimer()
    52  	for i := 0; i < b.N; i++ {
    53  		sCopy := make([]int64, len(s))
    54  		copy(sCopy, s)
    55  	}
    56  }
    57  
    58  func BenchmarkExpDecaySample257(b *testing.B) {
    59  	benchmarkSample(b, NewExpDecaySample(257, 0.015))
    60  }
    61  
    62  func BenchmarkExpDecaySample514(b *testing.B) {
    63  	benchmarkSample(b, NewExpDecaySample(514, 0.015))
    64  }
    65  
    66  func BenchmarkExpDecaySample1028(b *testing.B) {
    67  	benchmarkSample(b, NewExpDecaySample(1028, 0.015))
    68  }
    69  
    70  func BenchmarkUniformSample257(b *testing.B) {
    71  	benchmarkSample(b, NewUniformSample(257))
    72  }
    73  
    74  func BenchmarkUniformSample514(b *testing.B) {
    75  	benchmarkSample(b, NewUniformSample(514))
    76  }
    77  
    78  func BenchmarkUniformSample1028(b *testing.B) {
    79  	benchmarkSample(b, NewUniformSample(1028))
    80  }
    81  
    82  func TestExpDecaySample10(t *testing.T) {
    83  	rand.Seed(1)
    84  	s := NewExpDecaySample(100, 0.99)
    85  	for i := 0; i < 10; i++ {
    86  		s.Update(int64(i))
    87  	}
    88  	if size := s.Count(); 10 != size {
    89  		t.Errorf("s.Count(): 10 != %v\n", size)
    90  	}
    91  	if size := s.Size(); 10 != size {
    92  		t.Errorf("s.Size(): 10 != %v\n", size)
    93  	}
    94  	if l := len(s.Values()); 10 != l {
    95  		t.Errorf("len(s.Values()): 10 != %v\n", l)
    96  	}
    97  	for _, v := range s.Values() {
    98  		if v > 10 || v < 0 {
    99  			t.Errorf("out of range [0, 10): %v\n", v)
   100  		}
   101  	}
   102  }
   103  
   104  func TestExpDecaySample100(t *testing.T) {
   105  	rand.Seed(1)
   106  	s := NewExpDecaySample(1000, 0.01)
   107  	for i := 0; i < 100; i++ {
   108  		s.Update(int64(i))
   109  	}
   110  	if size := s.Count(); 100 != size {
   111  		t.Errorf("s.Count(): 100 != %v\n", size)
   112  	}
   113  	if size := s.Size(); 100 != size {
   114  		t.Errorf("s.Size(): 100 != %v\n", size)
   115  	}
   116  	if l := len(s.Values()); 100 != l {
   117  		t.Errorf("len(s.Values()): 100 != %v\n", l)
   118  	}
   119  	for _, v := range s.Values() {
   120  		if v > 100 || v < 0 {
   121  			t.Errorf("out of range [0, 100): %v\n", v)
   122  		}
   123  	}
   124  }
   125  
   126  func TestExpDecaySample1000(t *testing.T) {
   127  	rand.Seed(1)
   128  	s := NewExpDecaySample(100, 0.99)
   129  	for i := 0; i < 1000; i++ {
   130  		s.Update(int64(i))
   131  	}
   132  	if size := s.Count(); 1000 != size {
   133  		t.Errorf("s.Count(): 1000 != %v\n", size)
   134  	}
   135  	if size := s.Size(); 100 != size {
   136  		t.Errorf("s.Size(): 100 != %v\n", size)
   137  	}
   138  	if l := len(s.Values()); 100 != l {
   139  		t.Errorf("len(s.Values()): 100 != %v\n", l)
   140  	}
   141  	for _, v := range s.Values() {
   142  		if v > 1000 || v < 0 {
   143  			t.Errorf("out of range [0, 1000): %v\n", v)
   144  		}
   145  	}
   146  }
   147  
   148  // This test makes sure that the sample's priority is not amplified by using
   149  // nanosecond duration since start rather than second duration since start.
   150  // The priority becomes +Inf quickly after starting if this is done,
   151  // effectively freezing the set of samples until a rescale step happens.
   152  func TestExpDecaySampleNanosecondRegression(t *testing.T) {
   153  	rand.Seed(1)
   154  	s := NewExpDecaySample(100, 0.99)
   155  	for i := 0; i < 100; i++ {
   156  		s.Update(10)
   157  	}
   158  	time.Sleep(1 * time.Millisecond)
   159  	for i := 0; i < 100; i++ {
   160  		s.Update(20)
   161  	}
   162  	v := s.Values()
   163  	avg := float64(0)
   164  	for i := 0; i < len(v); i++ {
   165  		avg += float64(v[i])
   166  	}
   167  	avg /= float64(len(v))
   168  	if avg > 16 || avg < 14 {
   169  		t.Errorf("out of range [14, 16]: %v\n", avg)
   170  	}
   171  }
   172  
   173  func TestExpDecaySampleRescale(t *testing.T) {
   174  	s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
   175  	s.update(time.Now(), 1)
   176  	s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
   177  	for _, v := range s.values.Values() {
   178  		if v.k == 0.0 {
   179  			t.Fatal("v.k == 0.0")
   180  		}
   181  	}
   182  }
   183  
   184  func TestExpDecaySampleSnapshot(t *testing.T) {
   185  	now := time.Now()
   186  	rand.Seed(1)
   187  	s := NewExpDecaySample(100, 0.99)
   188  	for i := 1; i <= 10000; i++ {
   189  		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
   190  	}
   191  	snapshot := s.Snapshot()
   192  	s.Update(1)
   193  	testExpDecaySampleStatistics(t, snapshot)
   194  }
   195  
   196  func TestExpDecaySampleStatistics(t *testing.T) {
   197  	now := time.Now()
   198  	rand.Seed(1)
   199  	s := NewExpDecaySample(100, 0.99)
   200  	for i := 1; i <= 10000; i++ {
   201  		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
   202  	}
   203  	testExpDecaySampleStatistics(t, s)
   204  }
   205  
   206  func TestUniformSample(t *testing.T) {
   207  	rand.Seed(1)
   208  	s := NewUniformSample(100)
   209  	for i := 0; i < 1000; i++ {
   210  		s.Update(int64(i))
   211  	}
   212  	if size := s.Count(); 1000 != size {
   213  		t.Errorf("s.Count(): 1000 != %v\n", size)
   214  	}
   215  	if size := s.Size(); 100 != size {
   216  		t.Errorf("s.Size(): 100 != %v\n", size)
   217  	}
   218  	if l := len(s.Values()); 100 != l {
   219  		t.Errorf("len(s.Values()): 100 != %v\n", l)
   220  	}
   221  	for _, v := range s.Values() {
   222  		if v > 1000 || v < 0 {
   223  			t.Errorf("out of range [0, 100): %v\n", v)
   224  		}
   225  	}
   226  }
   227  
   228  func TestUniformSampleIncludesTail(t *testing.T) {
   229  	rand.Seed(1)
   230  	s := NewUniformSample(100)
   231  	max := 100
   232  	for i := 0; i < max; i++ {
   233  		s.Update(int64(i))
   234  	}
   235  	v := s.Values()
   236  	sum := 0
   237  	exp := (max - 1) * max / 2
   238  	for i := 0; i < len(v); i++ {
   239  		sum += int(v[i])
   240  	}
   241  	if exp != sum {
   242  		t.Errorf("sum: %v != %v\n", exp, sum)
   243  	}
   244  }
   245  
   246  func TestUniformSampleSnapshot(t *testing.T) {
   247  	rand.Seed(1)
   248  	s := NewUniformSample(100)
   249  	for i := 1; i <= 10000; i++ {
   250  		s.Update(int64(i))
   251  	}
   252  	snapshot := s.Snapshot()
   253  	s.Update(1)
   254  	testUniformSampleStatistics(t, snapshot)
   255  }
   256  
   257  func TestUniformSampleStatistics(t *testing.T) {
   258  	rand.Seed(1)
   259  	s := NewUniformSample(100)
   260  	for i := 1; i <= 10000; i++ {
   261  		s.Update(int64(i))
   262  	}
   263  	testUniformSampleStatistics(t, s)
   264  }
   265  
   266  func benchmarkSample(b *testing.B, s Sample) {
   267  	var memStats runtime.MemStats
   268  	runtime.ReadMemStats(&memStats)
   269  	pauseTotalNs := memStats.PauseTotalNs
   270  	b.ResetTimer()
   271  	for i := 0; i < b.N; i++ {
   272  		s.Update(1)
   273  	}
   274  	b.StopTimer()
   275  	runtime.GC()
   276  	runtime.ReadMemStats(&memStats)
   277  	b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
   278  }
   279  
   280  func testExpDecaySampleStatistics(t *testing.T, s Sample) {
   281  	if count := s.Count(); 10000 != count {
   282  		t.Errorf("s.Count(): 10000 != %v\n", count)
   283  	}
   284  	if min := s.Min(); 107 != min {
   285  		t.Errorf("s.Min(): 107 != %v\n", min)
   286  	}
   287  	if max := s.Max(); 10000 != max {
   288  		t.Errorf("s.Max(): 10000 != %v\n", max)
   289  	}
   290  	if mean := s.Mean(); 4965.98 != mean {
   291  		t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
   292  	}
   293  	if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
   294  		t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
   295  	}
   296  	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
   297  	if 4615 != ps[0] {
   298  		t.Errorf("median: 4615 != %v\n", ps[0])
   299  	}
   300  	if 7672 != ps[1] {
   301  		t.Errorf("75th percentile: 7672 != %v\n", ps[1])
   302  	}
   303  	if 9998.99 != ps[2] {
   304  		t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
   305  	}
   306  }
   307  
   308  func testUniformSampleStatistics(t *testing.T, s Sample) {
   309  	if count := s.Count(); 10000 != count {
   310  		t.Errorf("s.Count(): 10000 != %v\n", count)
   311  	}
   312  	if min := s.Min(); 37 != min {
   313  		t.Errorf("s.Min(): 37 != %v\n", min)
   314  	}
   315  	if max := s.Max(); 9989 != max {
   316  		t.Errorf("s.Max(): 9989 != %v\n", max)
   317  	}
   318  	if mean := s.Mean(); 4748.14 != mean {
   319  		t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
   320  	}
   321  	if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
   322  		t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
   323  	}
   324  	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
   325  	if 4599 != ps[0] {
   326  		t.Errorf("median: 4599 != %v\n", ps[0])
   327  	}
   328  	if 7380.5 != ps[1] {
   329  		t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
   330  	}
   331  	if math.Abs(9986.429999999998-ps[2]) > epsilonPercentile {
   332  		t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
   333  	}
   334  }
   335  
   336  // TestUniformSampleConcurrentUpdateCount would expose data race problems with
   337  // concurrent Update and Count calls on Sample when test is called with -race
   338  // argument
   339  func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
   340  	if testing.Short() {
   341  		t.Skip("skipping in short mode")
   342  	}
   343  	s := NewUniformSample(100)
   344  	for i := 0; i < 100; i++ {
   345  		s.Update(int64(i))
   346  	}
   347  	quit := make(chan struct{})
   348  	go func() {
   349  		t := time.NewTicker(10 * time.Millisecond)
   350  		for {
   351  			select {
   352  			case <-t.C:
   353  				s.Update(rand.Int63())
   354  			case <-quit:
   355  				t.Stop()
   356  				return
   357  			}
   358  		}
   359  	}()
   360  	for i := 0; i < 1000; i++ {
   361  		s.Count()
   362  		time.Sleep(5 * time.Millisecond)
   363  	}
   364  	quit <- struct{}{}
   365  }