github.com/ethereum/go-ethereum@v1.16.1/metrics/sample_test.go (about) 1 package metrics 2 3 import ( 4 "math" 5 "math/rand" 6 "testing" 7 "time" 8 ) 9 10 const epsilonPercentile = .00000000001 11 12 // Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively 13 // expensive computations like Variance, the cost of copying the Sample, as 14 // approximated by a make and copy, is much greater than the cost of the 15 // computation for small samples and only slightly less for large samples. 16 func BenchmarkCompute1000(b *testing.B) { 17 s := make([]int64, 1000) 18 var sum int64 19 for i := 0; i < len(s); i++ { 20 s[i] = int64(i) 21 sum += int64(i) 22 } 23 mean := float64(sum) / float64(len(s)) 24 b.ResetTimer() 25 for i := 0; i < b.N; i++ { 26 SampleVariance(mean, s) 27 } 28 } 29 30 func BenchmarkCompute1000000(b *testing.B) { 31 s := make([]int64, 1000000) 32 var sum int64 33 for i := 0; i < len(s); i++ { 34 s[i] = int64(i) 35 sum += int64(i) 36 } 37 mean := float64(sum) / float64(len(s)) 38 b.ResetTimer() 39 for i := 0; i < b.N; i++ { 40 SampleVariance(mean, s) 41 } 42 } 43 44 func BenchmarkExpDecaySample257(b *testing.B) { 45 benchmarkSample(b, NewExpDecaySample(257, 0.015)) 46 } 47 48 func BenchmarkExpDecaySample514(b *testing.B) { 49 benchmarkSample(b, NewExpDecaySample(514, 0.015)) 50 } 51 52 func BenchmarkExpDecaySample1028(b *testing.B) { 53 benchmarkSample(b, NewExpDecaySample(1028, 0.015)) 54 } 55 56 func BenchmarkUniformSample257(b *testing.B) { 57 benchmarkSample(b, NewUniformSample(257)) 58 } 59 60 func BenchmarkUniformSample514(b *testing.B) { 61 benchmarkSample(b, NewUniformSample(514)) 62 } 63 64 func BenchmarkUniformSample1028(b *testing.B) { 65 benchmarkSample(b, NewUniformSample(1028)) 66 } 67 68 func TestExpDecaySample(t *testing.T) { 69 for _, tc := range []struct { 70 reservoirSize int 71 alpha float64 72 updates int 73 }{ 74 {100, 0.99, 10}, 75 {1000, 0.01, 100}, 76 {100, 0.99, 1000}, 77 } { 78 sample := NewExpDecaySample(tc.reservoirSize, tc.alpha) 79 for i := 0; i < tc.updates; i++ { 80 sample.Update(int64(i)) 81 } 82 snap := sample.Snapshot() 83 if have, want := int(snap.Count()), tc.updates; have != want { 84 t.Errorf("unexpected count: have %d want %d", have, want) 85 } 86 if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want { 87 t.Errorf("unexpected size: have %d want %d", have, want) 88 } 89 values := snap.values 90 if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want { 91 t.Errorf("unexpected values length: have %d want %d", have, want) 92 } 93 for _, v := range values { 94 if v > int64(tc.updates) || v < 0 { 95 t.Errorf("out of range [0, %d]: %v", tc.updates, v) 96 } 97 } 98 } 99 } 100 101 // This test makes sure that the sample's priority is not amplified by using 102 // nanosecond duration since start rather than second duration since start. 103 // The priority becomes +Inf quickly after starting if this is done, 104 // effectively freezing the set of samples until a rescale step happens. 105 func TestExpDecaySampleNanosecondRegression(t *testing.T) { 106 sw := NewExpDecaySample(1000, 0.99) 107 for i := 0; i < 1000; i++ { 108 sw.Update(10) 109 } 110 time.Sleep(1 * time.Millisecond) 111 for i := 0; i < 1000; i++ { 112 sw.Update(20) 113 } 114 v := sw.Snapshot().values 115 avg := float64(0) 116 for i := 0; i < len(v); i++ { 117 avg += float64(v[i]) 118 } 119 avg /= float64(len(v)) 120 if avg > 16 || avg < 14 { 121 t.Errorf("out of range [14, 16]: %v\n", avg) 122 } 123 } 124 125 func TestExpDecaySampleRescale(t *testing.T) { 126 s := NewExpDecaySample(2, 0.001).(*ExpDecaySample) 127 s.update(time.Now(), 1) 128 s.update(time.Now().Add(time.Hour+time.Microsecond), 1) 129 for _, v := range s.values.Values() { 130 if v.k == 0.0 { 131 t.Fatal("v.k == 0.0") 132 } 133 } 134 } 135 136 func TestExpDecaySampleSnapshot(t *testing.T) { 137 now := time.Now() 138 s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1))) 139 for i := 1; i <= 10000; i++ { 140 s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) 141 } 142 snapshot := s.Snapshot() 143 s.Update(1) 144 testExpDecaySampleStatistics(t, snapshot) 145 } 146 147 func TestExpDecaySampleStatistics(t *testing.T) { 148 now := time.Now() 149 s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1))) 150 for i := 1; i <= 10000; i++ { 151 s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) 152 } 153 testExpDecaySampleStatistics(t, s.Snapshot()) 154 } 155 156 func TestUniformSample(t *testing.T) { 157 sw := NewUniformSample(100) 158 for i := 0; i < 1000; i++ { 159 sw.Update(int64(i)) 160 } 161 s := sw.Snapshot() 162 if size := s.Count(); size != 1000 { 163 t.Errorf("s.Count(): 1000 != %v\n", size) 164 } 165 if size := s.Size(); size != 100 { 166 t.Errorf("s.Size(): 100 != %v\n", size) 167 } 168 values := s.values 169 170 if l := len(values); l != 100 { 171 t.Errorf("len(s.Values()): 100 != %v\n", l) 172 } 173 for _, v := range values { 174 if v > 1000 || v < 0 { 175 t.Errorf("out of range [0, 1000]: %v\n", v) 176 } 177 } 178 } 179 180 func TestUniformSampleIncludesTail(t *testing.T) { 181 sw := NewUniformSample(100) 182 max := 100 183 for i := 0; i < max; i++ { 184 sw.Update(int64(i)) 185 } 186 v := sw.Snapshot().values 187 sum := 0 188 exp := (max - 1) * max / 2 189 for i := 0; i < len(v); i++ { 190 sum += int(v[i]) 191 } 192 if exp != sum { 193 t.Errorf("sum: %v != %v\n", exp, sum) 194 } 195 } 196 197 func TestUniformSampleSnapshot(t *testing.T) { 198 s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1))) 199 for i := 1; i <= 10000; i++ { 200 s.Update(int64(i)) 201 } 202 snapshot := s.Snapshot() 203 s.Update(1) 204 testUniformSampleStatistics(t, snapshot) 205 } 206 207 func TestUniformSampleStatistics(t *testing.T) { 208 s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1))) 209 for i := 1; i <= 10000; i++ { 210 s.Update(int64(i)) 211 } 212 testUniformSampleStatistics(t, s.Snapshot()) 213 } 214 215 func benchmarkSample(b *testing.B, s Sample) { 216 for i := 0; i < b.N; i++ { 217 s.Update(1) 218 } 219 } 220 221 func testExpDecaySampleStatistics(t *testing.T, s *sampleSnapshot) { 222 if sum := s.Sum(); sum != 496598 { 223 t.Errorf("s.Sum(): 496598 != %v\n", sum) 224 } 225 if count := s.Count(); count != 10000 { 226 t.Errorf("s.Count(): 10000 != %v\n", count) 227 } 228 if min := s.Min(); min != 107 { 229 t.Errorf("s.Min(): 107 != %v\n", min) 230 } 231 if max := s.Max(); max != 10000 { 232 t.Errorf("s.Max(): 10000 != %v\n", max) 233 } 234 if mean := s.Mean(); mean != 4965.98 { 235 t.Errorf("s.Mean(): 4965.98 != %v\n", mean) 236 } 237 if stdDev := s.StdDev(); stdDev != 2959.825156930727 { 238 t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev) 239 } 240 ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) 241 if ps[0] != 4615 { 242 t.Errorf("median: 4615 != %v\n", ps[0]) 243 } 244 if ps[1] != 7672 { 245 t.Errorf("75th percentile: 7672 != %v\n", ps[1]) 246 } 247 if ps[2] != 9998.99 { 248 t.Errorf("99th percentile: 9998.99 != %v\n", ps[2]) 249 } 250 } 251 252 func testUniformSampleStatistics(t *testing.T, s *sampleSnapshot) { 253 if count := s.Count(); count != 10000 { 254 t.Errorf("s.Count(): 10000 != %v\n", count) 255 } 256 if min := s.Min(); min != 37 { 257 t.Errorf("s.Min(): 37 != %v\n", min) 258 } 259 if max := s.Max(); max != 9989 { 260 t.Errorf("s.Max(): 9989 != %v\n", max) 261 } 262 if mean := s.Mean(); mean != 4748.14 { 263 t.Errorf("s.Mean(): 4748.14 != %v\n", mean) 264 } 265 if stdDev := s.StdDev(); stdDev != 2826.684117548333 { 266 t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev) 267 } 268 ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) 269 if ps[0] != 4599 { 270 t.Errorf("median: 4599 != %v\n", ps[0]) 271 } 272 if ps[1] != 7380.5 { 273 t.Errorf("75th percentile: 7380.5 != %v\n", ps[1]) 274 } 275 if math.Abs(9986.429999999998-ps[2]) > epsilonPercentile { 276 t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2]) 277 } 278 } 279 280 // TestUniformSampleConcurrentUpdateCount would expose data race problems with 281 // concurrent Update and Count calls on Sample when test is called with -race 282 // argument 283 func TestUniformSampleConcurrentUpdateCount(t *testing.T) { 284 if testing.Short() { 285 t.Skip("skipping in short mode") 286 } 287 s := NewUniformSample(100) 288 for i := 0; i < 100; i++ { 289 s.Update(int64(i)) 290 } 291 quit := make(chan struct{}) 292 go func() { 293 t := time.NewTicker(10 * time.Millisecond) 294 defer t.Stop() 295 for { 296 select { 297 case <-t.C: 298 s.Update(rand.Int63()) 299 case <-quit: 300 t.Stop() 301 return 302 } 303 } 304 }() 305 for i := 0; i < 1000; i++ { 306 s.Snapshot().Count() 307 time.Sleep(5 * time.Millisecond) 308 } 309 quit <- struct{}{} 310 } 311 312 func BenchmarkCalculatePercentiles(b *testing.B) { 313 pss := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} 314 var vals []int64 315 for i := 0; i < 1000; i++ { 316 vals = append(vals, int64(rand.Int31())) 317 } 318 v := make([]int64, len(vals)) 319 b.ResetTimer() 320 for i := 0; i < b.N; i++ { 321 copy(v, vals) 322 _ = CalculatePercentiles(v, pss) 323 } 324 }