github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/plan_opt_test.go (about) 1 // Copyright 2018 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package sql 12 13 import ( 14 "context" 15 gosql "database/sql" 16 "fmt" 17 "reflect" 18 "testing" 19 20 "github.com/cockroachdb/cockroach/pkg/base" 21 "github.com/cockroachdb/cockroach/pkg/testutils" 22 "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" 23 "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" 24 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 25 "github.com/cockroachdb/cockroach/pkg/util/randutil" 26 "github.com/cockroachdb/errors" 27 "github.com/stretchr/testify/assert" 28 "golang.org/x/sync/errgroup" 29 ) 30 31 type queryCacheTestHelper struct { 32 srv serverutils.TestServerInterface 33 godb *gosql.DB 34 35 conns []*gosql.Conn 36 runners []*sqlutils.SQLRunner 37 38 hitsDelta, missesDelta int 39 } 40 41 func makeQueryCacheTestHelper(tb testing.TB, numConns int) *queryCacheTestHelper { 42 h := &queryCacheTestHelper{} 43 h.srv, h.godb, _ = serverutils.StartServer(tb, base.TestServerArgs{}) 44 45 h.conns = make([]*gosql.Conn, numConns) 46 h.runners = make([]*sqlutils.SQLRunner, numConns) 47 for i := range h.conns { 48 var err error 49 h.conns[i], err = h.godb.Conn(context.Background()) 50 if err != nil { 51 tb.Fatal(err) 52 } 53 h.runners[i] = sqlutils.MakeSQLRunner(h.conns[i]) 54 } 55 r0 := h.runners[0] 56 r0.Exec(tb, "DROP DATABASE IF EXISTS db1") 57 r0.Exec(tb, "DROP DATABASE IF EXISTS db2") 58 r0.Exec(tb, "CREATE DATABASE db1") 59 r0.Exec(tb, "CREATE TABLE db1.t (a INT, b INT)") 60 r0.Exec(tb, "INSERT INTO db1.t VALUES (1, 1)") 61 for _, r := range h.runners { 62 r.Exec(tb, "SET DATABASE = db1") 63 } 64 r0.Exec(tb, "SET CLUSTER SETTING sql.query_cache.enabled = true") 65 h.ResetStats() 66 return h 67 } 68 69 func (h *queryCacheTestHelper) Stop() { 70 h.srv.Stopper().Stop(context.Background()) 71 } 72 73 func (h *queryCacheTestHelper) GetStats() (numHits, numMisses int) { 74 return int(h.srv.MustGetSQLCounter(MetaSQLOptPlanCacheHits.Name)) - h.hitsDelta, 75 int(h.srv.MustGetSQLCounter(MetaSQLOptPlanCacheMisses.Name)) - h.missesDelta 76 } 77 78 func (h *queryCacheTestHelper) ResetStats() { 79 hits, misses := h.GetStats() 80 h.hitsDelta += hits 81 h.missesDelta += misses 82 } 83 84 func (h *queryCacheTestHelper) AssertStats(tb *testing.T, expHits, expMisses int) { 85 tb.Helper() 86 hits, misses := h.GetStats() 87 assert.Equal(tb, expHits, hits, "hits") 88 assert.Equal(tb, expMisses, misses, "misses") 89 } 90 91 func TestQueryCache(t *testing.T) { 92 defer leaktest.AfterTest(t)() 93 94 // Grouping the parallel subtests into a non-parallel subtest allows the defer 95 // call above to work as expected. 96 t.Run("group", func(t *testing.T) { 97 t.Run("simple", func(t *testing.T) { 98 t.Parallel() // SAFE FOR TESTING 99 const numConns = 4 100 h := makeQueryCacheTestHelper(t, numConns) 101 defer h.Stop() 102 103 // Alternate between the connections. 104 for i := 0; i < 5; i++ { 105 for _, r := range h.runners { 106 r.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}}) 107 } 108 } 109 // We should have 1 miss and the rest hits. 110 h.AssertStats(t, 5*numConns-1, 1) 111 }) 112 113 t.Run("simple-prepare", func(t *testing.T) { 114 t.Parallel() // SAFE FOR TESTING 115 const numConns = 4 116 h := makeQueryCacheTestHelper(t, numConns) 117 defer h.Stop() 118 119 // Alternate between the connections. 120 for i := 0; i < 5; i++ { 121 for _, r := range h.runners { 122 r.Exec(t, fmt.Sprintf("PREPARE a%d AS SELECT * FROM t", i)) 123 } 124 } 125 // We should have 1 miss and the rest hits. 126 h.AssertStats(t, 5*numConns-1, 1) 127 128 for i := 0; i < 5; i++ { 129 for _, r := range h.runners { 130 r.CheckQueryResults( 131 t, 132 fmt.Sprintf("EXECUTE a%d", i), 133 [][]string{{"1", "1"}}, 134 ) 135 } 136 } 137 }) 138 139 t.Run("simple-prepare-with-args", func(t *testing.T) { 140 t.Parallel() // SAFE FOR TESTING 141 const numConns = 4 142 h := makeQueryCacheTestHelper(t, numConns) 143 defer h.Stop() 144 145 // Alternate between the connections. 146 for i := 0; i < 5; i++ { 147 for _, r := range h.runners { 148 r.Exec(t, fmt.Sprintf("PREPARE a%d AS SELECT a + $1, b + $2 FROM t", i)) 149 } 150 } 151 // We should have 1 miss and the rest hits. 152 h.AssertStats(t, 5*numConns-1, 1) 153 154 for i := 0; i < 5; i++ { 155 for _, r := range h.runners { 156 r.CheckQueryResults( 157 t, 158 fmt.Sprintf("EXECUTE a%d (10, 100)", i), 159 [][]string{{"11", "101"}}, 160 ) 161 r.CheckQueryResults( 162 t, 163 fmt.Sprintf("EXECUTE a%d (20, 200)", i), 164 [][]string{{"21", "201"}}, 165 ) 166 } 167 } 168 }) 169 170 t.Run("parallel", func(t *testing.T) { 171 t.Parallel() // SAFE FOR TESTING 172 const numConns = 4 173 h := makeQueryCacheTestHelper(t, numConns) 174 defer h.Stop() 175 176 var group errgroup.Group 177 for connIdx := range h.conns { 178 c := h.conns[connIdx] 179 group.Go(func() error { 180 for j := 0; j < 10; j++ { 181 rows, err := c.QueryContext(context.Background(), "SELECT * FROM t") 182 if err != nil { 183 return err 184 } 185 res, err := sqlutils.RowsToStrMatrix(rows) 186 if err != nil { 187 return err 188 } 189 if !reflect.DeepEqual(res, [][]string{{"1", "1"}}) { 190 return errors.Errorf("incorrect results %v", res) 191 } 192 } 193 return nil 194 }) 195 } 196 if err := group.Wait(); err != nil { 197 t.Fatal(err) 198 } 199 }) 200 201 t.Run("parallel-prepare", func(t *testing.T) { 202 t.Parallel() // SAFE FOR TESTING 203 const numConns = 4 204 h := makeQueryCacheTestHelper(t, numConns) 205 defer h.Stop() 206 207 var group errgroup.Group 208 for connIdx := range h.conns { 209 c := h.conns[connIdx] 210 group.Go(func() error { 211 ctx := context.Background() 212 for j := 0; j < 10; j++ { 213 // Query with a multi-use CTE (as a regression test for #44867). The 214 // left join condition never passes so this is really equivalent to: 215 // SELECT a+$1,b+$2 FROM t 216 query := fmt.Sprintf(`PREPARE a%d AS 217 WITH cte(x,y) AS (SELECT a+$1, b+$2 FROM t) 218 SELECT cte.x, cte.y FROM cte LEFT JOIN cte as cte2 on cte.y = cte2.x`, j) 219 220 if _, err := c.ExecContext(ctx, query); err != nil { 221 return err 222 } 223 rows, err := c.QueryContext(ctx, fmt.Sprintf("EXECUTE a%d (10, 100)", j)) 224 if err != nil { 225 return err 226 } 227 res, err := sqlutils.RowsToStrMatrix(rows) 228 if err != nil { 229 return err 230 } 231 if !reflect.DeepEqual(res, [][]string{{"11", "101"}}) { 232 return errors.Errorf("incorrect results %v", res) 233 } 234 } 235 return nil 236 }) 237 } 238 if err := group.Wait(); err != nil { 239 t.Fatal(err) 240 } 241 }) 242 243 // Test connections running the same statement but under different databases. 244 t.Run("multidb", func(t *testing.T) { 245 t.Parallel() // SAFE FOR TESTING 246 const numConns = 4 247 h := makeQueryCacheTestHelper(t, numConns) 248 defer h.Stop() 249 250 r0 := h.runners[0] 251 r0.Exec(t, "CREATE DATABASE db2") 252 r0.Exec(t, "CREATE TABLE db2.t (a INT)") 253 r0.Exec(t, "INSERT INTO db2.t VALUES (2)") 254 for i := range h.runners { 255 if i%2 == 1 { 256 h.runners[i].Exec(t, "SET DATABASE = db2") 257 } 258 } 259 // Alternate between the connections. 260 for i := 0; i < 5; i++ { 261 for j, r := range h.runners { 262 var res [][]string 263 if j%2 == 0 { 264 res = [][]string{{"1", "1"}} 265 } else { 266 res = [][]string{{"2"}} 267 } 268 r.CheckQueryResults(t, "SELECT * FROM t", res) 269 } 270 } 271 }) 272 273 t.Run("multidb-prepare", func(t *testing.T) { 274 t.Parallel() // SAFE FOR TESTING 275 const numConns = 4 276 h := makeQueryCacheTestHelper(t, numConns) 277 defer h.Stop() 278 279 r0 := h.runners[0] 280 r0.Exec(t, "CREATE DATABASE db2") 281 r0.Exec(t, "CREATE TABLE db2.t (a INT)") 282 r0.Exec(t, "INSERT INTO db2.t VALUES (2)") 283 for i := range h.runners { 284 if i%2 == 1 { 285 h.runners[i].Exec(t, "SET DATABASE = db2") 286 } 287 } 288 // Alternate between the connections. 289 for i := 0; i < 5; i++ { 290 for j, r := range h.runners { 291 r.Exec(t, fmt.Sprintf("PREPARE a%d AS SELECT a + $1 FROM t", i)) 292 var res [][]string 293 if j%2 == 0 { 294 res = [][]string{{"11"}} 295 } else { 296 res = [][]string{{"12"}} 297 } 298 r.CheckQueryResults(t, fmt.Sprintf("EXECUTE a%d (10)", i), res) 299 } 300 } 301 }) 302 303 // Test that a schema change triggers cache invalidation. 304 t.Run("schemachange", func(t *testing.T) { 305 t.Parallel() // SAFE FOR TESTING 306 h := makeQueryCacheTestHelper(t, 2 /* numConns */) 307 defer h.Stop() 308 r0, r1 := h.runners[0], h.runners[1] 309 r0.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}}) 310 h.AssertStats(t, 0 /* hits */, 1 /* misses */) 311 r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}}) 312 h.AssertStats(t, 1 /* hits */, 1 /* misses */) 313 r0.Exec(t, "ALTER TABLE t ADD COLUMN c INT AS (a+b) STORED") 314 h.AssertStats(t, 1 /* hits */, 1 /* misses */) 315 r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1", "2"}}) 316 h.AssertStats(t, 1 /* hits */, 2 /* misses */) 317 }) 318 319 // Test that creating new statistics triggers cache invalidation. 320 t.Run("statschange", func(t *testing.T) { 321 t.Parallel() // SAFE FOR TESTING 322 h := makeQueryCacheTestHelper(t, 2 /* numConns */) 323 defer h.Stop() 324 r0, r1 := h.runners[0], h.runners[1] 325 r0.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}}) 326 h.AssertStats(t, 0 /* hits */, 1 /* misses */) 327 r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}}) 328 h.AssertStats(t, 1 /* hits */, 1 /* misses */) 329 r0.Exec(t, "CREATE STATISTICS s FROM t") 330 h.AssertStats(t, 1 /* hits */, 1 /* misses */) 331 r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}}) 332 h.AssertStats(t, 1 /* hits */, 2 /* misses */) 333 }) 334 335 // Test that a schema change triggers cache invalidation. 336 t.Run("schemachange-prepare", func(t *testing.T) { 337 t.Parallel() // SAFE FOR TESTING 338 h := makeQueryCacheTestHelper(t, 2 /* numConns */) 339 defer h.Stop() 340 r0, r1 := h.runners[0], h.runners[1] 341 r0.Exec(t, "PREPARE a AS SELECT * FROM t") 342 r0.CheckQueryResults(t, "EXECUTE a", [][]string{{"1", "1"}}) 343 r0.CheckQueryResults(t, "EXECUTE a", [][]string{{"1", "1"}}) 344 r0.Exec(t, "ALTER TABLE t ADD COLUMN c INT AS (a+b) STORED") 345 r1.Exec(t, "PREPARE b AS SELECT * FROM t") 346 r1.CheckQueryResults(t, "EXECUTE b", [][]string{{"1", "1", "2"}}) 347 }) 348 349 // Test a schema change where the other connections are running the query in 350 // parallel. 351 t.Run("schemachange-parallel", func(t *testing.T) { 352 t.Parallel() // SAFE FOR TESTING 353 const numConns = 4 354 355 h := makeQueryCacheTestHelper(t, numConns) 356 defer h.Stop() 357 var group errgroup.Group 358 for connIdx := 1; connIdx < numConns; connIdx++ { 359 c := h.conns[connIdx] 360 connIdx := connIdx 361 group.Go(func() error { 362 sawChanged := false 363 prepIdx := 0 364 doQuery := func() error { 365 // Some threads do prepare, others execute directly. 366 var rows *gosql.Rows 367 var err error 368 ctx := context.Background() 369 if connIdx%2 == 1 { 370 rows, err = c.QueryContext(ctx, "SELECT * FROM t") 371 } else { 372 prepIdx++ 373 _, err = c.ExecContext(ctx, fmt.Sprintf("PREPARE a%d AS SELECT * FROM t", prepIdx)) 374 if err == nil { 375 rows, err = c.QueryContext(ctx, fmt.Sprintf("EXECUTE a%d", prepIdx)) 376 if err != nil { 377 // If the schema change happens in-between the PREPARE and 378 // EXECUTE, we will get an error. Tolerate this error if we 379 // haven't seen updated results already. 380 if !sawChanged && testutils.IsError(err, "cached plan must not change result type") { 381 t.Logf("thread %d hit race", connIdx) 382 return nil 383 } 384 } 385 } 386 } 387 if err != nil { 388 return err 389 } 390 res, err := sqlutils.RowsToStrMatrix(rows) 391 if err != nil { 392 return err 393 } 394 if reflect.DeepEqual(res, [][]string{{"1", "1"}}) { 395 if sawChanged { 396 return errors.Errorf("Saw updated results, then older results") 397 } 398 } else if reflect.DeepEqual(res, [][]string{{"1", "1", "2"}}) { 399 sawChanged = true 400 } else { 401 return errors.Errorf("incorrect results %v", res) 402 } 403 return nil 404 } 405 406 // Run the query until we see an updated result. 407 for !sawChanged { 408 if err := doQuery(); err != nil { 409 return err 410 } 411 } 412 t.Logf("thread %d saw changed results", connIdx) 413 414 // Now run the query a bunch more times to make sure we keep reading the 415 // updated version. 416 for i := 0; i < 10; i++ { 417 if err := doQuery(); err != nil { 418 return err 419 } 420 } 421 return nil 422 }) 423 } 424 r0 := h.runners[0] 425 r0.Exec(t, "ALTER TABLE t ADD COLUMN c INT AS (a+b) STORED") 426 if err := group.Wait(); err != nil { 427 t.Fatal(err) 428 } 429 }) 430 431 // Verify the case where a PREPARE encounters a query cache entry that was 432 // created by a direct execution (and hence has no PrepareMetadata). 433 t.Run("exec-and-prepare", func(t *testing.T) { 434 t.Parallel() // SAFE FOR TESTING 435 h := makeQueryCacheTestHelper(t, 1 /* numConns */) 436 defer h.Stop() 437 438 r0 := h.runners[0] 439 r0.Exec(t, "SELECT * FROM t") // Should miss the cache. 440 h.AssertStats(t, 0 /* hits */, 1 /* misses */) 441 442 r0.Exec(t, "SELECT * FROM t") // Should hit the cache. 443 h.AssertStats(t, 1 /* hits */, 1 /* misses */) 444 445 r0.Exec(t, "PREPARE x AS SELECT * FROM t") // Should miss the cache. 446 h.AssertStats(t, 1 /* hits */, 2 /* misses */) 447 448 r0.Exec(t, "PREPARE y AS SELECT * FROM t") // Should hit the cache. 449 h.AssertStats(t, 2 /* hits */, 2 /* misses */) 450 451 r0.CheckQueryResults(t, "EXECUTE x", [][]string{{"1", "1"}}) 452 r0.CheckQueryResults(t, "EXECUTE y", [][]string{{"1", "1"}}) 453 }) 454 455 // Verify the case where we PREPARE the same statement with different hints. 456 t.Run("prepare-hints", func(t *testing.T) { 457 t.Parallel() // SAFE FOR TESTING 458 h := makeQueryCacheTestHelper(t, 1 /* numConns */) 459 defer h.Stop() 460 461 r0 := h.runners[0] 462 r0.Exec(t, "PREPARE a1 AS SELECT pg_typeof(1 + $1)") // Should miss the cache. 463 h.AssertStats(t, 0 /* hits */, 1 /* misses */) 464 465 r0.Exec(t, "PREPARE a2 AS SELECT pg_typeof(1 + $1)") // Should hit the cache. 466 h.AssertStats(t, 1 /* hits */, 1 /* misses */) 467 468 r0.Exec(t, "PREPARE b1 (float) AS SELECT pg_typeof(1 + $1)") // Should miss the cache. 469 h.AssertStats(t, 1 /* hits */, 2 /* misses */) 470 471 r0.Exec(t, "PREPARE b2 (float) AS SELECT pg_typeof(1 + $1)") // Should hit the cache. 472 h.AssertStats(t, 2 /* hits */, 2 /* misses */) 473 474 r0.Exec(t, "PREPARE c1 (decimal) AS SELECT pg_typeof(1 + $1)") // Should miss the cache. 475 h.AssertStats(t, 2 /* hits */, 3 /* misses */) 476 477 r0.Exec(t, "PREPARE c2 (decimal) AS SELECT pg_typeof(1 + $1)") // Should hit the cache. 478 h.AssertStats(t, 3 /* hits */, 3 /* misses */) 479 480 r0.Exec(t, "PREPARE a3 AS SELECT pg_typeof(1 + $1)") // Should miss the cache. 481 h.AssertStats(t, 3 /* hits */, 4 /* misses */) 482 483 r0.Exec(t, "PREPARE b3 (float) AS SELECT pg_typeof(1 + $1)") // Should miss the cache. 484 h.AssertStats(t, 3 /* hits */, 5 /* misses */) 485 486 r0.Exec(t, "PREPARE c3 (decimal) AS SELECT pg_typeof(1 + $1)") // Should miss the cache. 487 h.AssertStats(t, 3 /* hits */, 6 /* misses */) 488 489 r0.CheckQueryResults(t, "EXECUTE a1 (1)", [][]string{{"bigint"}}) 490 r0.CheckQueryResults(t, "EXECUTE a2 (1)", [][]string{{"bigint"}}) 491 r0.CheckQueryResults(t, "EXECUTE a3 (1)", [][]string{{"bigint"}}) 492 493 r0.CheckQueryResults(t, "EXECUTE b1 (1)", [][]string{{"double precision"}}) 494 r0.CheckQueryResults(t, "EXECUTE b2 (1)", [][]string{{"double precision"}}) 495 r0.CheckQueryResults(t, "EXECUTE b3 (1)", [][]string{{"double precision"}}) 496 497 r0.CheckQueryResults(t, "EXECUTE c1 (1)", [][]string{{"numeric"}}) 498 r0.CheckQueryResults(t, "EXECUTE c2 (1)", [][]string{{"numeric"}}) 499 r0.CheckQueryResults(t, "EXECUTE c3 (1)", [][]string{{"numeric"}}) 500 }) 501 }) 502 } 503 504 // BenchmarkQueryCache is a set of benchmarks that run queries against a server 505 // with the query cache on and off, with varying number of parallel clients and 506 // with workloads that are either cacheable or not. 507 // 508 // For microbenchmarks of the query cache data structures, see the sql/querycache 509 // package. 510 func BenchmarkQueryCache(b *testing.B) { 511 defer leaktest.AfterTest(b)() 512 513 workloads := []string{"small", "large"} 514 methods := []string{"simple", "prepare-once", "prepare-each"} 515 516 run := func( 517 b *testing.B, 518 numClients int, 519 workloadIdx int, 520 methodIdx int, 521 cacheOn bool, 522 ) { 523 h := makeQueryCacheTestHelper(b, numClients) 524 defer h.Stop() 525 r0 := h.runners[0] 526 r0.Exec(b, "CREATE TABLE kv (k INT PRIMARY KEY, v INT)") 527 528 r0.Exec(b, fmt.Sprintf("SET CLUSTER SETTING sql.query_cache.enabled = %t", cacheOn)) 529 var group errgroup.Group 530 b.ResetTimer() 531 for connIdx := 0; connIdx < numClients; connIdx++ { 532 c := h.conns[connIdx] 533 group.Go(func() error { 534 rng, _ := randutil.NewPseudoRand() 535 ctx := context.Background() 536 // We use a small or large range of values depending on the 537 // workload type. 538 valRange := 0 539 switch workloadIdx { 540 case 0: // small 541 valRange = 100 542 case 1: // large 543 valRange = 10000000 544 } 545 var stmt *gosql.Stmt 546 if methodIdx == 1 { 547 var err error 548 stmt, err = c.PrepareContext(ctx, "SELECT v FROM kv WHERE k=$1") 549 if err != nil { 550 return err 551 } 552 } 553 554 for i := 0; i < b.N/numClients; i++ { 555 val := rng.Intn(valRange) 556 var err error 557 switch methodIdx { 558 case 0: // simple 559 query := fmt.Sprintf("SELECT v FROM kv WHERE k=%d", val) 560 _, err = c.ExecContext(ctx, query) 561 562 case 1: // prepare-once 563 _, err = stmt.ExecContext(ctx, val) 564 565 case 2: // prepare-every-time 566 _, err = c.ExecContext(ctx, "SELECT v FROM kv WHERE k=$1", val) 567 } 568 if err != nil { 569 return err 570 } 571 } 572 return nil 573 }) 574 if err := group.Wait(); err != nil { 575 b.Fatal(err) 576 } 577 } 578 } 579 580 for workload, workloadName := range workloads { 581 b.Run(workloadName, func(b *testing.B) { 582 for _, clients := range []int{1, 4, 8} { 583 b.Run(fmt.Sprintf("clients-%d", clients), func(b *testing.B) { 584 for method, methodName := range methods { 585 b.Run(methodName, func(b *testing.B) { 586 for _, cache := range []bool{false, true} { 587 name := "cache-off" 588 if cache { 589 name = "cache-on" 590 } 591 b.Run(name, func(b *testing.B) { 592 run(b, clients, workload, method, cache) 593 }) 594 } 595 }) 596 } 597 }) 598 } 599 }) 600 } 601 }