github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/opt/opbench/opbench_test.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package opbench_test
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	gosql "database/sql"
    17  	"encoding/csv"
    18  	"flag"
    19  	"fmt"
    20  	"io"
    21  	"io/ioutil"
    22  	"os"
    23  	"strconv"
    24  	"testing"
    25  
    26  	"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
    27  	"github.com/cockroachdb/cockroach/pkg/sql/opt/opbench"
    28  	"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/opttester"
    29  	"github.com/cockroachdb/cockroach/pkg/util/timeutil"
    30  	"github.com/cockroachdb/errors"
    31  )
    32  
    33  var pgurl = flag.String("url", "postgresql://localhost:26257/tpch?sslmode=disable&user=root", "the url to connect to")
    34  
    35  const rewriteEstimatedFlag = "opbench-rewrite-estimated"
    36  const rewriteActualFlag = "opbench-rewrite-actual"
    37  
    38  var rewriteEstimated = flag.Bool(rewriteEstimatedFlag, false, "re-calculate the estimated costs for each Plan")
    39  var rewriteActual = flag.Bool(rewriteActualFlag, false, "re-measure the runtime for each Plan")
    40  
    41  // TODO(justin): we need a system to get consistent re-measurements of all the
    42  // "actual" results. We will probably want some ability to kick off a roachprod
    43  // cluster which will crunch through them all. It's still valuable to be
    44  // able to do individual tests locally for the purposes of iteration.
    45  // TODO(justin): there should be a metadata file that records the setup that
    46  // each measurement was recorded on (what computer, cockroach version, date,
    47  // etc).
    48  // TODO(justin): we should be able to give each test tags, in case you want
    49  // to plot say, every scan spec, or every hash join spec.
    50  // TODO(justin): at some point we will likely want to record data along more
    51  // dimensions (various sizes of node clusters, number CPUs, etc).
    52  // TODO(justin): this should run as a roachtest which keeps track of the
    53  // correlation of estimated and actual over time.
    54  // TODO(justin): these tests currently measure the latency of a query, this
    55  // is not entirely correct. We should measure throughput (somehow), but
    56  // latency is easier for now.
    57  
    58  // TestOpBench is a hybrid benchmark/test of the cost model. It supports
    59  // running parameterized pre-specified plans and verifying their estimated
    60  // costs against a CSV.
    61  //
    62  // Similar to the data-driven tests, it supports automatically updating
    63  // test files when the cost model changes when run with the
    64  // -opbench-rewrite-estimated flag.
    65  //
    66  // It also supports actually running the plans against a cluster to measure how
    67  // long they take to run when run with the -opbench-rewrite-actual flag. This
    68  // will run the queries against the url specified by the -url flag (by default
    69  // localhost:26257).
    70  //
    71  // The end result of this is that the resulting CSV files can be imported
    72  // into a plotting program to inspect the correlation, or the correlation
    73  // can be manually verified.
    74  func TestOpBench(t *testing.T) {
    75  	rm := runMode{
    76  		rewriteEstimated: *rewriteEstimated,
    77  		rewriteActual:    *rewriteActual,
    78  	}
    79  	for _, spec := range Benches {
    80  		t.Run(spec.Name, func(t *testing.T) {
    81  			runBench(t, spec, fmt.Sprintf("testdata/%s.csv", spec.Name), rm)
    82  		})
    83  	}
    84  }
    85  
    86  // measureQuery runs a query against a running Cockroach cluster and records how
    87  // long it takes to run.
    88  func measureQuery(planText string) (int64, error) {
    89  	db, err := gosql.Open("postgres", *pgurl)
    90  	if err != nil {
    91  		return 0, errors.Wrap(err, "can only recompute actual results when pointed at a running Cockroach cluster")
    92  	}
    93  
    94  	ctx := context.Background()
    95  
    96  	c, err := db.Conn(ctx)
    97  	if err != nil {
    98  		return 0, err
    99  	}
   100  
   101  	if _, err := c.ExecContext(ctx, "SET allow_prepare_as_opt_plan = 'on'"); err != nil {
   102  		return 0, err
   103  	}
   104  
   105  	// TODO(justin): make this more resilient: good error (or auto-import)
   106  	// if TPCH isn't available.
   107  	if _, err := c.ExecContext(ctx, "USE tpch"); err != nil {
   108  		return 0, err
   109  	}
   110  
   111  	// Use a 1 minute timeout.
   112  	// These benchmarks shouldn't go that long generally anyway.
   113  	if _, err := c.ExecContext(ctx, "SET statement_timeout = 60*1000"); err != nil {
   114  		return 0, err
   115  	}
   116  
   117  	text := fmt.Sprintf(`PREPARE p AS OPT PLAN '%s'`, planText)
   118  
   119  	_, err = c.ExecContext(ctx, text)
   120  	if err != nil {
   121  		return 0, err
   122  	}
   123  
   124  	// TODO(justin): optionally take several measurements and record the
   125  	// mean+stdev.
   126  	start := timeutil.Now().UnixNano()
   127  	if _, err := c.ExecContext(ctx, "EXECUTE p DISCARD ROWS"); err != nil {
   128  		return 0, err
   129  	}
   130  	end := timeutil.Now().UnixNano()
   131  	if _, err := c.ExecContext(ctx, "DEALLOCATE p"); err != nil {
   132  		return 0, err
   133  	}
   134  
   135  	return end - start, nil
   136  }
   137  
   138  type runMode struct {
   139  	rewriteEstimated bool
   140  	rewriteActual    bool
   141  }
   142  
   143  // param is used to keep track of which parameter exists at which
   144  // index in the CSV file.
   145  type param struct {
   146  	idx  int
   147  	name string
   148  }
   149  
   150  // getBlankCSV returns an io.Reader to a CSV containing all combinations
   151  // of possible InputNames for the Spec so that the framework can fill in the
   152  // blanks.
   153  func getBlankCSV(spec *opbench.Spec) io.Reader {
   154  	var out bytes.Buffer
   155  	w := csv.NewWriter(&out)
   156  
   157  	inputs := spec.InputNames()
   158  
   159  	header := append(append([]string(nil), inputs...), "estimated", "actual")
   160  	if err := w.Write(header); err != nil {
   161  		panic(err)
   162  	}
   163  
   164  	it := opbench.NewConfigIterator(spec)
   165  	c, ok := it.Next()
   166  	for ok {
   167  		var rec []string
   168  		for _, t := range inputs {
   169  			rec = append(rec, fmt.Sprintf("%d", int(c[t])))
   170  		}
   171  		// Add placeholder values for the estimated and actual.
   172  		rec = append(rec, "0", "0")
   173  		if err := w.Write(rec); err != nil {
   174  			panic(err)
   175  		}
   176  		c, ok = it.Next()
   177  	}
   178  	w.Flush()
   179  
   180  	return &out
   181  }
   182  
   183  // runBench iterates through a configuration CSV (possibly creating one if it
   184  // doesn't exist) and verifies that the estimated costs for each query did not
   185  // change. It can optionally run in a mode which rewrites the estimated cost,
   186  // the actual runtime, or both.
   187  func runBench(t *testing.T, spec *opbench.Spec, path string, mode runMode) {
   188  	f, err := os.Open(path)
   189  	var r *csv.Reader
   190  	if err != nil {
   191  		if !mode.rewriteEstimated || !mode.rewriteActual {
   192  			t.Fatalf(
   193  				"file %q does not exist, to create it, run with -%s and -%s",
   194  				path,
   195  				rewriteEstimatedFlag,
   196  				rewriteActualFlag,
   197  			)
   198  		}
   199  		r = csv.NewReader(getBlankCSV(spec))
   200  	} else {
   201  		r = csv.NewReader(f)
   202  	}
   203  	defer f.Close()
   204  
   205  	headers, err := r.Read()
   206  	if err != nil {
   207  		t.Fatal(err)
   208  	}
   209  
   210  	var result bytes.Buffer
   211  	w := csv.NewWriter(&result)
   212  	if err := w.Write(headers); err != nil {
   213  		t.Fatal(err)
   214  	}
   215  
   216  	params := make([]param, 0)
   217  	estimatedIdx := -1
   218  	actualIdx := -1
   219  	for i := range headers {
   220  		switch headers[i] {
   221  		case "estimated":
   222  			estimatedIdx = i
   223  		case "actual":
   224  			actualIdx = i
   225  		default:
   226  			params = append(params, param{i, headers[i]})
   227  		}
   228  	}
   229  
   230  	// TODO(justin): we should support multiple catalogs, and each query
   231  	// should specify which catalog it pertains to.
   232  	catalog := opbench.MakeTPCHCatalog()
   233  
   234  	for {
   235  		record, err := r.Read()
   236  		if err == io.EOF {
   237  			break
   238  		}
   239  		if err != nil {
   240  			t.Fatal(err)
   241  		}
   242  
   243  		conf := opbench.Configuration{}
   244  		newRecord := make([]string, len(record))
   245  		copy(newRecord, record)
   246  		for i := range params {
   247  			val, err := strconv.ParseFloat(record[params[i].idx], 64)
   248  			if err != nil {
   249  				t.Fatal(err)
   250  			}
   251  			conf[params[i].name] = val
   252  		}
   253  
   254  		planText := spec.FillInParams(conf)
   255  
   256  		// Re-compute the actual cost. We just pass through the old value
   257  		// if not asked to regenerate this.
   258  		if mode.rewriteActual {
   259  			actual, err := measureQuery(planText)
   260  			if err != nil {
   261  				t.Fatal(err)
   262  			}
   263  
   264  			seconds := float64(actual) / 1000000000
   265  
   266  			newRecord[actualIdx] = fmt.Sprintf("%f", seconds)
   267  		}
   268  
   269  		// Compute the estimated cost.
   270  
   271  		tester := opttester.New(catalog, planText)
   272  		e, err := tester.Expr()
   273  		if err != nil {
   274  			t.Fatal(err)
   275  		}
   276  
   277  		cost := fmt.Sprintf("%f", e.(memo.RelExpr).Cost())
   278  
   279  		if mode.rewriteEstimated {
   280  			newRecord[estimatedIdx] = cost
   281  		} else if newRecord[estimatedIdx] != cost {
   282  			t.Errorf(
   283  				"%s/%s:\n  expected: %s\n    actual: %s",
   284  				path,
   285  				conf,
   286  				newRecord[estimatedIdx],
   287  				cost,
   288  			)
   289  		}
   290  		if err := w.Write(newRecord); err != nil {
   291  			t.Fatal(err)
   292  		}
   293  	}
   294  	w.Flush()
   295  
   296  	if mode.rewriteEstimated || mode.rewriteActual {
   297  		if err := ioutil.WriteFile(path, result.Bytes(), 0644); err != nil {
   298  			t.Fatal(err)
   299  		}
   300  	}
   301  }
   302  
   303  // Benches is the set of benchmarks we run.
   304  var Benches = []*opbench.Spec{
   305  	HashJoinSpec,
   306  	MergeJoinSpec,
   307  	LookupJoinSpec,
   308  	SortLineitemSpec,
   309  	ScanOrdersSpec,
   310  	ScanLineitemSpec,
   311  }
   312  
   313  // HashJoinSpec does a hash join between supplier and lineitem.
   314  var HashJoinSpec = &opbench.Spec{
   315  	Name: "tpch-hash-join",
   316  	Plan: `
   317  (Root
   318  	(InnerJoin
   319  		(Scan
   320  			[
   321  				(Table "supplier")
   322  				(Cols "s_suppkey")
   323  				(Index "supplier@s_nk")
   324  				(HardLimit $supplier_rows)
   325  			]
   326  		)
   327  		(Scan
   328  			[
   329  				(Table "lineitem")
   330  				(Cols "l_suppkey")
   331  				(Index "lineitem@l_sk")
   332  				(HardLimit $lineitem_rows)
   333  			]
   334  		)
   335  		[
   336  			(Eq (Var "l_suppkey") (Var "s_suppkey"))
   337  		]
   338  		[ ]
   339  	)
   340  	(Presentation "l_suppkey")
   341  	(NoOrdering)
   342  )`,
   343  
   344  	Inputs: []opbench.Options{
   345  		{Field: "lineitem_rows", Values: []float64{1000000, 2000000, 3000000, 4000000, 5000000, 6000000}},
   346  		{Field: "supplier_rows", Values: []float64{1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}},
   347  	},
   348  
   349  	GetParam: func(paramName string, config opbench.Configuration) string {
   350  		switch paramName {
   351  		case "lineitem_rows":
   352  			return fmt.Sprintf("%d", int(config["lineitem_rows"]))
   353  		case "supplier_rows":
   354  			return fmt.Sprintf("%d", int(config["supplier_rows"]))
   355  		}
   356  		panic(fmt.Sprintf("can't handle %q", paramName))
   357  	},
   358  }
   359  
   360  // MergeJoinSpec does a merge join between supplier and lineitem.
   361  var MergeJoinSpec = &opbench.Spec{
   362  	Name: "tpch-merge-join",
   363  	Plan: `
   364  (Root
   365  	(MergeJoin
   366  		(Scan
   367  			[
   368  				(Table "lineitem")
   369  				(Cols "l_suppkey")
   370  				(Index "lineitem@l_sk")
   371  				(HardLimit $lineitem_rows)
   372  			]
   373  		)
   374  		(Scan
   375  			[
   376  				(Table "supplier")
   377  				(Cols "s_suppkey")
   378  				(HardLimit $supplier_rows)
   379  			]
   380  		)
   381  		[ ]
   382  		[
   383  			(JoinType "inner-join")
   384  			(LeftEq "+l_suppkey")
   385  			(RightEq "+s_suppkey")
   386  			(LeftOrdering "+l_suppkey")
   387  			(RightOrdering "+s_suppkey")
   388  		]
   389  	)
   390  	(Presentation "l_suppkey")
   391  	(NoOrdering)
   392  )`,
   393  
   394  	Inputs: []opbench.Options{
   395  		{Field: "lineitem_rows", Values: []float64{1000000, 2000000, 3000000, 4000000, 5000000, 6000000}},
   396  		{Field: "supplier_rows", Values: []float64{1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}},
   397  	},
   398  
   399  	GetParam: func(paramName string, config opbench.Configuration) string {
   400  		switch paramName {
   401  		case "lineitem_rows":
   402  			return fmt.Sprintf("%d", int(config["lineitem_rows"]))
   403  		case "supplier_rows":
   404  			return fmt.Sprintf("%d", int(config["supplier_rows"]))
   405  		}
   406  		panic(fmt.Sprintf("can't handle %q", paramName))
   407  	},
   408  }
   409  
   410  // LookupJoinSpec does a lookup join between supplier and lineitem.
   411  var LookupJoinSpec = &opbench.Spec{
   412  	Name: "tpch-lookup-join",
   413  	Plan: `
   414  (Root
   415  	(MakeLookupJoin
   416  		(Scan
   417  			[
   418  				(Table "supplier")
   419  				(Index "supplier@s_nk")
   420  				(Cols "s_suppkey")
   421  				(HardLimit $supplier_rows)
   422  			]
   423  		)
   424  		[
   425  			(JoinType "inner-join")
   426  			(Table "lineitem")
   427  			(Index "lineitem@l_sk")
   428  			(KeyCols "s_suppkey")
   429  			(Cols "l_suppkey")
   430  			(LookupColsAreTableKey "true")
   431  		]
   432  		[
   433  		]
   434  	)
   435  	(Presentation "l_suppkey")
   436  	(NoOrdering)
   437  )`,
   438  
   439  	Inputs: []opbench.Options{
   440  		{Field: "supplier_rows", Values: []float64{1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}},
   441  	},
   442  
   443  	GetParam: func(paramName string, config opbench.Configuration) string {
   444  		switch paramName {
   445  		case "supplier_rows":
   446  			return fmt.Sprintf("%d", int(config["supplier_rows"]))
   447  		}
   448  		panic(fmt.Sprintf("can't handle %q", paramName))
   449  	},
   450  }
   451  
   452  // colPrefix returns a comma-separated list of the first n columns in allCols.
   453  func colPrefix(allCols []string, n int) string {
   454  	var result bytes.Buffer
   455  	for i := 0; i < n; i++ {
   456  		if i > 0 {
   457  			result.WriteByte(',')
   458  		}
   459  		result.WriteString(allCols[i])
   460  	}
   461  	return result.String()
   462  }
   463  
   464  func makeScanSpec(
   465  	name string, tableName string, rowCounts []float64, colCounts []float64, colNames []string,
   466  ) *opbench.Spec {
   467  	return &opbench.Spec{
   468  		Name: name,
   469  		Plan: fmt.Sprintf(`
   470  (Root
   471  	(Scan
   472  		[
   473  			(Table "%s")
   474  			(Cols "$cols")
   475  			(HardLimit $rows)
   476  		]
   477  	)
   478  	(Presentation "$cols")
   479  	(NoOrdering)
   480  )`, tableName),
   481  
   482  		Inputs: []opbench.Options{
   483  			{Field: "rows", Values: rowCounts},
   484  			{Field: "num_cols", Values: colCounts},
   485  		},
   486  
   487  		GetParam: func(paramName string, config opbench.Configuration) string {
   488  			switch paramName {
   489  			case "rows":
   490  				return fmt.Sprintf("%d", int(config["rows"]))
   491  			case "cols":
   492  				return colPrefix(colNames, int(config["num_cols"]))
   493  			}
   494  			panic(fmt.Sprintf("can't handle %q", paramName))
   495  		},
   496  	}
   497  }
   498  
   499  // ScanLineitemSpec scans the lineitem table.
   500  var ScanLineitemSpec = makeScanSpec(
   501  	"scan-lineitem",
   502  	"lineitem",
   503  	[]float64{1000000, 2000000, 3000000, 4000000, 5000000, 6000000},
   504  	[]float64{1, 2, 3, 4, 16},
   505  	[]string{
   506  		"l_orderkey", "l_partkey", "l_suppkey",
   507  		"l_linenumber", "l_quantity", "l_extendedprice",
   508  		"l_discount", "l_tax", "l_returnflag",
   509  		"l_linestatus", "l_shipdate", "l_commitdate",
   510  		"l_receiptdate", "l_shipinstruct", "l_shipmode",
   511  		"l_comment",
   512  	},
   513  )
   514  
   515  // ScanOrdersSpec scans the orders table.
   516  var ScanOrdersSpec = makeScanSpec(
   517  	"scan-orders",
   518  	"orders",
   519  	[]float64{250000, 500000, 750000, 1000000, 1250000, 1500000},
   520  	[]float64{1, 3, 6, 9},
   521  	[]string{
   522  		"o_orderkey", "o_custkey", "o_orderstatus",
   523  		"o_totalprice", "o_orderdate", "o_orderpriority",
   524  		"o_clerk", "o_shippriority", "o_comment",
   525  	},
   526  )
   527  
   528  func makeSortSpec(
   529  	name string,
   530  	tableName string,
   531  	rowCounts []float64,
   532  	colCounts []float64,
   533  	colNames []string,
   534  	ordering string,
   535  ) *opbench.Spec {
   536  	return &opbench.Spec{
   537  		Name: name,
   538  		Plan: fmt.Sprintf(`
   539  (Root
   540  	(Sort
   541  		(Scan
   542  			[
   543  				(Table "%s")
   544  				(Cols "$cols")
   545  				(HardLimit $rows)
   546  			]
   547  		)
   548  	)
   549  	(Presentation "$cols")
   550  	(OrderingChoice "%s")
   551  )`, tableName, ordering),
   552  
   553  		Inputs: []opbench.Options{
   554  			{Field: "rows", Values: rowCounts},
   555  			{Field: "num_cols", Values: colCounts},
   556  		},
   557  
   558  		GetParam: func(paramName string, config opbench.Configuration) string {
   559  			switch paramName {
   560  			case "rows":
   561  				return fmt.Sprintf("%d", int(config["rows"]))
   562  			case "cols":
   563  				return colPrefix(colNames, int(config["num_cols"]))
   564  			}
   565  			panic(fmt.Sprintf("can't handle %q", paramName))
   566  		},
   567  	}
   568  }
   569  
   570  // SortLineitemSpec scans and sorts the lineitem table.
   571  var SortLineitemSpec = makeSortSpec(
   572  	"sort-lineitem",
   573  	"lineitem",
   574  	[]float64{1000000, 2000000, 3000000, 4000000, 5000000, 6000000},
   575  	[]float64{1, 2, 3},
   576  	[]string{
   577  		"l_orderkey", "l_partkey", "l_suppkey",
   578  		"l_linenumber", "l_quantity", "l_extendedprice",
   579  		"l_discount", "l_tax", "l_returnflag",
   580  		"l_linestatus", "l_shipdate", "l_commitdate",
   581  		"l_receiptdate", "l_shipinstruct", "l_shipmode",
   582  		"l_comment",
   583  	},
   584  	"+l_orderkey",
   585  )