github.com/jgbaldwinbrown/perf@v0.1.1/benchstat/table.go (about)

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package benchstat
     6  
     7  import (
     8  	"fmt"
     9  	"strings"
    10  
    11  	"golang.org/x/perf/internal/stats"
    12  )
    13  
    14  // A Table is a table for display in the benchstat output.
    15  type Table struct {
    16  	Metric      string
    17  	OldNewDelta bool // is this an old-new-delta table?
    18  	Configs     []string
    19  	Groups      []string
    20  	Rows        []*Row
    21  }
    22  
    23  // A Row is a table row for display in the benchstat output.
    24  type Row struct {
    25  	Benchmark string     // benchmark name
    26  	Group     string     // group name
    27  	Scaler    Scaler     // formatter for stats means
    28  	Metrics   []*Metrics // columns of statistics
    29  	PctDelta  float64    // unformatted percent change
    30  	Delta     string     // formatted percent change
    31  	Note      string     // additional information
    32  	Change    int        // +1 better, -1 worse, 0 unchanged
    33  }
    34  
    35  // Tables returns tables comparing the benchmarks in the collection.
    36  func (c *Collection) Tables() []*Table {
    37  	deltaTest := c.DeltaTest
    38  	if deltaTest == nil {
    39  		deltaTest = UTest
    40  	}
    41  	alpha := c.Alpha
    42  	if alpha == 0 {
    43  		alpha = 0.05
    44  	}
    45  
    46  	// Update statistics.
    47  	for _, m := range c.Metrics {
    48  		m.computeStats()
    49  	}
    50  
    51  	var tables []*Table
    52  	key := Key{}
    53  	for _, key.Unit = range c.Units {
    54  		table := new(Table)
    55  		table.Configs = c.Configs
    56  		table.Groups = c.Groups
    57  		table.Metric = metricOf(key.Unit)
    58  		table.OldNewDelta = len(c.Configs) == 2
    59  		for _, key.Group = range c.Groups {
    60  			for _, key.Benchmark = range c.Benchmarks[key.Group] {
    61  				row := &Row{Benchmark: key.Benchmark}
    62  				if len(c.Groups) > 1 {
    63  					// Show group headers if there is more than one group.
    64  					row.Group = key.Group
    65  				}
    66  
    67  				for _, key.Config = range c.Configs {
    68  					m := c.Metrics[key]
    69  					if m == nil {
    70  						row.Metrics = append(row.Metrics, new(Metrics))
    71  						continue
    72  					}
    73  					row.Metrics = append(row.Metrics, m)
    74  					if row.Scaler == nil {
    75  						row.Scaler = NewScaler(m.Mean, m.Unit)
    76  					}
    77  				}
    78  
    79  				// If there are only two configs being compared, add stats.
    80  				if table.OldNewDelta {
    81  					k0 := key
    82  					k0.Config = c.Configs[0]
    83  					k1 := key
    84  					k1.Config = c.Configs[1]
    85  					old := c.Metrics[k0]
    86  					new := c.Metrics[k1]
    87  					// If one is missing, omit row entirely.
    88  					// TODO: Control this better.
    89  					if old == nil || new == nil {
    90  						continue
    91  					}
    92  					pval, testerr := deltaTest(old, new)
    93  					row.PctDelta = 0.00
    94  					row.Delta = "~"
    95  					if testerr == stats.ErrZeroVariance {
    96  						row.Note = "(zero variance)"
    97  					} else if testerr == stats.ErrSampleSize {
    98  						row.Note = "(too few samples)"
    99  					} else if testerr == stats.ErrSamplesEqual {
   100  						row.Note = "(all equal)"
   101  					} else if testerr != nil {
   102  						row.Note = fmt.Sprintf("(%s)", testerr)
   103  					} else if pval < alpha {
   104  						if new.Mean == old.Mean {
   105  							row.Delta = "0.00%"
   106  						} else {
   107  							pct := ((new.Mean / old.Mean) - 1.0) * 100.0
   108  							row.PctDelta = pct
   109  							row.Delta = fmt.Sprintf("%+.2f%%", pct)
   110  							if pct < 0 == (table.Metric != "speed") { // smaller is better, except speeds
   111  								row.Change = +1
   112  							} else {
   113  								row.Change = -1
   114  							}
   115  						}
   116  					}
   117  					if row.Note == "" && pval != -1 {
   118  						row.Note = fmt.Sprintf("(p=%0.3f n=%d+%d)", pval, len(old.RValues), len(new.RValues))
   119  					}
   120  				}
   121  
   122  				table.Rows = append(table.Rows, row)
   123  			}
   124  		}
   125  
   126  		if len(table.Rows) > 0 {
   127  			if c.Order != nil {
   128  				Sort(table, c.Order)
   129  			}
   130  			if c.AddGeoMean {
   131  				addGeomean(c, table, key.Unit, table.OldNewDelta)
   132  			}
   133  			tables = append(tables, table)
   134  		}
   135  	}
   136  
   137  	return tables
   138  }
   139  
   140  var metricSuffix = map[string]string{
   141  	"ns/op": "time/op",
   142  	"ns/GC": "time/GC",
   143  	"B/op":  "alloc/op",
   144  	"MB/s":  "speed",
   145  }
   146  
   147  // metricOf returns the name of the metric with the given unit.
   148  func metricOf(unit string) string {
   149  	if s := metricSuffix[unit]; s != "" {
   150  		return s
   151  	}
   152  	for s, suff := range metricSuffix {
   153  		if dashs := "-" + s; strings.HasSuffix(unit, dashs) {
   154  			prefix := strings.TrimSuffix(unit, dashs)
   155  			return prefix + "-" + suff
   156  		}
   157  	}
   158  	return unit
   159  }
   160  
   161  // addGeomean adds a "geomean" row to the table,
   162  // showing the geometric mean of all the benchmarks.
   163  func addGeomean(c *Collection, t *Table, unit string, delta bool) {
   164  	row := &Row{Benchmark: "[Geo mean]"}
   165  	key := Key{Unit: unit}
   166  	geomeans := []float64{}
   167  	maxCount := 0
   168  	for _, key.Config = range c.Configs {
   169  		var means []float64
   170  		for _, key.Group = range c.Groups {
   171  			for _, key.Benchmark = range c.Benchmarks[key.Group] {
   172  				m := c.Metrics[key]
   173  				// Omit 0 values from the geomean calculation,
   174  				// as these either make the geomean undefined
   175  				// or zero (depending on who you ask). This
   176  				// typically comes up with things like
   177  				// allocation counts, where it's fine to just
   178  				// ignore the benchmark.
   179  				if m != nil && m.Mean != 0 {
   180  					means = append(means, m.Mean)
   181  				}
   182  			}
   183  		}
   184  		if len(means) > maxCount {
   185  			maxCount = len(means)
   186  		}
   187  		if len(means) == 0 {
   188  			row.Metrics = append(row.Metrics, new(Metrics))
   189  			delta = false
   190  		} else {
   191  			geomean := stats.GeoMean(means)
   192  			geomeans = append(geomeans, geomean)
   193  			if row.Scaler == nil {
   194  				row.Scaler = NewScaler(geomean, unit)
   195  			}
   196  			row.Metrics = append(row.Metrics, &Metrics{
   197  				Unit: unit,
   198  				Mean: geomean,
   199  			})
   200  		}
   201  	}
   202  	if maxCount <= 1 {
   203  		// Only one benchmark contributed to this geomean.
   204  		// Since the geomean is the same as the benchmark
   205  		// result, don't bother outputting it.
   206  		return
   207  	}
   208  	if delta {
   209  		pct := ((geomeans[1] / geomeans[0]) - 1.0) * 100.0
   210  		row.PctDelta = pct
   211  		row.Delta = fmt.Sprintf("%+.2f%%", pct)
   212  	}
   213  	t.Rows = append(t.Rows, row)
   214  }