github.com/tcnksm/go@v0.0.0-20141208075154-439b32936367/src/testing/benchmark.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package testing 6 7 import ( 8 "flag" 9 "fmt" 10 "os" 11 "runtime" 12 "sync" 13 "sync/atomic" 14 "time" 15 ) 16 17 var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run") 18 var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark") 19 var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks") 20 21 // Global lock to ensure only one benchmark runs at a time. 22 var benchmarkLock sync.Mutex 23 24 // Used for every benchmark for measuring memory. 25 var memStats runtime.MemStats 26 27 // An internal type but exported because it is cross-package; part of the implementation 28 // of the "go test" command. 29 type InternalBenchmark struct { 30 Name string 31 F func(b *B) 32 } 33 34 // B is a type passed to Benchmark functions to manage benchmark 35 // timing and to specify the number of iterations to run. 36 type B struct { 37 common 38 N int 39 previousN int // number of iterations in the previous run 40 previousDuration time.Duration // total duration of the previous run 41 benchmark InternalBenchmark 42 bytes int64 43 timerOn bool 44 showAllocResult bool 45 result BenchmarkResult 46 parallelism int // RunParallel creates parallelism*GOMAXPROCS goroutines 47 // The initial states of memStats.Mallocs and memStats.TotalAlloc. 48 startAllocs uint64 49 startBytes uint64 50 // The net total of this test after being run. 51 netAllocs uint64 52 netBytes uint64 53 } 54 55 // StartTimer starts timing a test. This function is called automatically 56 // before a benchmark starts, but it can also used to resume timing after 57 // a call to StopTimer. 58 func (b *B) StartTimer() { 59 if !b.timerOn { 60 runtime.ReadMemStats(&memStats) 61 b.startAllocs = memStats.Mallocs 62 b.startBytes = memStats.TotalAlloc 63 b.start = time.Now() 64 b.timerOn = true 65 } 66 } 67 68 // StopTimer stops timing a test. This can be used to pause the timer 69 // while performing complex initialization that you don't 70 // want to measure. 71 func (b *B) StopTimer() { 72 if b.timerOn { 73 b.duration += time.Now().Sub(b.start) 74 runtime.ReadMemStats(&memStats) 75 b.netAllocs += memStats.Mallocs - b.startAllocs 76 b.netBytes += memStats.TotalAlloc - b.startBytes 77 b.timerOn = false 78 } 79 } 80 81 // ResetTimer zeros the elapsed benchmark time and memory allocation counters. 82 // It does not affect whether the timer is running. 83 func (b *B) ResetTimer() { 84 if b.timerOn { 85 runtime.ReadMemStats(&memStats) 86 b.startAllocs = memStats.Mallocs 87 b.startBytes = memStats.TotalAlloc 88 b.start = time.Now() 89 } 90 b.duration = 0 91 b.netAllocs = 0 92 b.netBytes = 0 93 } 94 95 // SetBytes records the number of bytes processed in a single operation. 96 // If this is called, the benchmark will report ns/op and MB/s. 97 func (b *B) SetBytes(n int64) { b.bytes = n } 98 99 // ReportAllocs enables malloc statistics for this benchmark. 100 // It is equivalent to setting -test.benchmem, but it only affects the 101 // benchmark function that calls ReportAllocs. 102 func (b *B) ReportAllocs() { 103 b.showAllocResult = true 104 } 105 106 func (b *B) nsPerOp() int64 { 107 if b.N <= 0 { 108 return 0 109 } 110 return b.duration.Nanoseconds() / int64(b.N) 111 } 112 113 // runN runs a single benchmark for the specified number of iterations. 114 func (b *B) runN(n int) { 115 benchmarkLock.Lock() 116 defer benchmarkLock.Unlock() 117 // Try to get a comparable environment for each run 118 // by clearing garbage from previous runs. 119 runtime.GC() 120 b.N = n 121 b.parallelism = 1 122 b.ResetTimer() 123 b.StartTimer() 124 b.benchmark.F(b) 125 b.StopTimer() 126 b.previousN = n 127 b.previousDuration = b.duration 128 } 129 130 func min(x, y int) int { 131 if x > y { 132 return y 133 } 134 return x 135 } 136 137 func max(x, y int) int { 138 if x < y { 139 return y 140 } 141 return x 142 } 143 144 // roundDown10 rounds a number down to the nearest power of 10. 145 func roundDown10(n int) int { 146 var tens = 0 147 // tens = floor(log_10(n)) 148 for n >= 10 { 149 n = n / 10 150 tens++ 151 } 152 // result = 10^tens 153 result := 1 154 for i := 0; i < tens; i++ { 155 result *= 10 156 } 157 return result 158 } 159 160 // roundUp rounds x up to a number of the form [1eX, 2eX, 3eX, 5eX]. 161 func roundUp(n int) int { 162 base := roundDown10(n) 163 switch { 164 case n <= base: 165 return base 166 case n <= (2 * base): 167 return 2 * base 168 case n <= (3 * base): 169 return 3 * base 170 case n <= (5 * base): 171 return 5 * base 172 default: 173 return 10 * base 174 } 175 } 176 177 // run times the benchmark function in a separate goroutine. 178 func (b *B) run() BenchmarkResult { 179 go b.launch() 180 <-b.signal 181 return b.result 182 } 183 184 // launch launches the benchmark function. It gradually increases the number 185 // of benchmark iterations until the benchmark runs for the requested benchtime. 186 // It prints timing information in this form 187 // testing.BenchmarkHello 100000 19 ns/op 188 // launch is run by the run function as a separate goroutine. 189 func (b *B) launch() { 190 // Run the benchmark for a single iteration in case it's expensive. 191 n := 1 192 193 // Signal that we're done whether we return normally 194 // or by FailNow's runtime.Goexit. 195 defer func() { 196 b.signal <- b 197 }() 198 199 b.runN(n) 200 // Run the benchmark for at least the specified amount of time. 201 d := *benchTime 202 for !b.failed && b.duration < d && n < 1e9 { 203 last := n 204 // Predict required iterations. 205 if b.nsPerOp() == 0 { 206 n = 1e9 207 } else { 208 n = int(d.Nanoseconds() / b.nsPerOp()) 209 } 210 // Run more iterations than we think we'll need (1.2x). 211 // Don't grow too fast in case we had timing errors previously. 212 // Be sure to run at least one more than last time. 213 n = max(min(n+n/5, 100*last), last+1) 214 // Round up to something easy to read. 215 n = roundUp(n) 216 b.runN(n) 217 } 218 b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes} 219 } 220 221 // The results of a benchmark run. 222 type BenchmarkResult struct { 223 N int // The number of iterations. 224 T time.Duration // The total time taken. 225 Bytes int64 // Bytes processed in one iteration. 226 MemAllocs uint64 // The total number of memory allocations. 227 MemBytes uint64 // The total number of bytes allocated. 228 } 229 230 func (r BenchmarkResult) NsPerOp() int64 { 231 if r.N <= 0 { 232 return 0 233 } 234 return r.T.Nanoseconds() / int64(r.N) 235 } 236 237 func (r BenchmarkResult) mbPerSec() float64 { 238 if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 { 239 return 0 240 } 241 return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds() 242 } 243 244 func (r BenchmarkResult) AllocsPerOp() int64 { 245 if r.N <= 0 { 246 return 0 247 } 248 return int64(r.MemAllocs) / int64(r.N) 249 } 250 251 func (r BenchmarkResult) AllocedBytesPerOp() int64 { 252 if r.N <= 0 { 253 return 0 254 } 255 return int64(r.MemBytes) / int64(r.N) 256 } 257 258 func (r BenchmarkResult) String() string { 259 mbs := r.mbPerSec() 260 mb := "" 261 if mbs != 0 { 262 mb = fmt.Sprintf("\t%7.2f MB/s", mbs) 263 } 264 nsop := r.NsPerOp() 265 ns := fmt.Sprintf("%10d ns/op", nsop) 266 if r.N > 0 && nsop < 100 { 267 // The format specifiers here make sure that 268 // the ones digits line up for all three possible formats. 269 if nsop < 10 { 270 ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) 271 } else { 272 ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) 273 } 274 } 275 return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb) 276 } 277 278 func (r BenchmarkResult) MemString() string { 279 return fmt.Sprintf("%8d B/op\t%8d allocs/op", 280 r.AllocedBytesPerOp(), r.AllocsPerOp()) 281 } 282 283 // An internal function but exported because it is cross-package; part of the implementation 284 // of the "go test" command. 285 func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) { 286 // If no flag was specified, don't run benchmarks. 287 if len(*matchBenchmarks) == 0 { 288 return 289 } 290 for _, Benchmark := range benchmarks { 291 matched, err := matchString(*matchBenchmarks, Benchmark.Name) 292 if err != nil { 293 fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err) 294 os.Exit(1) 295 } 296 if !matched { 297 continue 298 } 299 for _, procs := range cpuList { 300 runtime.GOMAXPROCS(procs) 301 b := &B{ 302 common: common{ 303 signal: make(chan interface{}), 304 }, 305 benchmark: Benchmark, 306 } 307 benchName := Benchmark.Name 308 if procs != 1 { 309 benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs) 310 } 311 fmt.Printf("%s\t", benchName) 312 r := b.run() 313 if b.failed { 314 // The output could be very long here, but probably isn't. 315 // We print it all, regardless, because we don't want to trim the reason 316 // the benchmark failed. 317 fmt.Printf("--- FAIL: %s\n%s", benchName, b.output) 318 continue 319 } 320 results := r.String() 321 if *benchmarkMemory || b.showAllocResult { 322 results += "\t" + r.MemString() 323 } 324 fmt.Println(results) 325 // Unlike with tests, we ignore the -chatty flag and always print output for 326 // benchmarks since the output generation time will skew the results. 327 if len(b.output) > 0 { 328 b.trimOutput() 329 fmt.Printf("--- BENCH: %s\n%s", benchName, b.output) 330 } 331 if p := runtime.GOMAXPROCS(-1); p != procs { 332 fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p) 333 } 334 } 335 } 336 } 337 338 // trimOutput shortens the output from a benchmark, which can be very long. 339 func (b *B) trimOutput() { 340 // The output is likely to appear multiple times because the benchmark 341 // is run multiple times, but at least it will be seen. This is not a big deal 342 // because benchmarks rarely print, but just in case, we trim it if it's too long. 343 const maxNewlines = 10 344 for nlCount, j := 0, 0; j < len(b.output); j++ { 345 if b.output[j] == '\n' { 346 nlCount++ 347 if nlCount >= maxNewlines { 348 b.output = append(b.output[:j], "\n\t... [output truncated]\n"...) 349 break 350 } 351 } 352 } 353 } 354 355 // A PB is used by RunParallel for running parallel benchmarks. 356 type PB struct { 357 globalN *uint64 // shared between all worker goroutines iteration counter 358 grain uint64 // acquire that many iterations from globalN at once 359 cache uint64 // local cache of acquired iterations 360 bN uint64 // total number of iterations to execute (b.N) 361 } 362 363 // Next reports whether there are more iterations to execute. 364 func (pb *PB) Next() bool { 365 if pb.cache == 0 { 366 n := atomic.AddUint64(pb.globalN, pb.grain) 367 if n <= pb.bN { 368 pb.cache = pb.grain 369 } else if n < pb.bN+pb.grain { 370 pb.cache = pb.bN + pb.grain - n 371 } else { 372 return false 373 } 374 } 375 pb.cache-- 376 return true 377 } 378 379 // RunParallel runs a benchmark in parallel. 380 // It creates multiple goroutines and distributes b.N iterations among them. 381 // The number of goroutines defaults to GOMAXPROCS. To increase parallelism for 382 // non-CPU-bound benchmarks, call SetParallelism before RunParallel. 383 // RunParallel is usually used with the go test -cpu flag. 384 // 385 // The body function will be run in each goroutine. It should set up any 386 // goroutine-local state and then iterate until pb.Next returns false. 387 // It should not use the StartTimer, StopTimer, or ResetTimer functions, 388 // because they have global effect. 389 func (b *B) RunParallel(body func(*PB)) { 390 // Calculate grain size as number of iterations that take ~100µs. 391 // 100µs is enough to amortize the overhead and provide sufficient 392 // dynamic load balancing. 393 grain := uint64(0) 394 if b.previousN > 0 && b.previousDuration > 0 { 395 grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration) 396 } 397 if grain < 1 { 398 grain = 1 399 } 400 // We expect the inner loop and function call to take at least 10ns, 401 // so do not do more than 100µs/10ns=1e4 iterations. 402 if grain > 1e4 { 403 grain = 1e4 404 } 405 406 n := uint64(0) 407 numProcs := b.parallelism * runtime.GOMAXPROCS(0) 408 var wg sync.WaitGroup 409 wg.Add(numProcs) 410 for p := 0; p < numProcs; p++ { 411 go func() { 412 defer wg.Done() 413 pb := &PB{ 414 globalN: &n, 415 grain: grain, 416 bN: uint64(b.N), 417 } 418 body(pb) 419 }() 420 } 421 wg.Wait() 422 if n <= uint64(b.N) && !b.Failed() { 423 b.Fatal("RunParallel: body exited without pb.Next() == false") 424 } 425 } 426 427 // SetParallelism sets the number of goroutines used by RunParallel to p*GOMAXPROCS. 428 // There is usually no need to call SetParallelism for CPU-bound benchmarks. 429 // If p is less than 1, this call will have no effect. 430 func (b *B) SetParallelism(p int) { 431 if p >= 1 { 432 b.parallelism = p 433 } 434 } 435 436 // Benchmark benchmarks a single function. Useful for creating 437 // custom benchmarks that do not use the "go test" command. 438 func Benchmark(f func(b *B)) BenchmarkResult { 439 b := &B{ 440 common: common{ 441 signal: make(chan interface{}), 442 }, 443 benchmark: InternalBenchmark{"", f}, 444 } 445 return b.run() 446 }