github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/logictest/logic.go (about) 1 // Copyright 2015 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package logictest 12 13 import ( 14 "bufio" 15 "bytes" 16 "context" 17 "crypto/md5" 18 gosql "database/sql" 19 "flag" 20 "fmt" 21 "io" 22 "math/rand" 23 "net/url" 24 "os" 25 "path/filepath" 26 "reflect" 27 "regexp" 28 "runtime/debug" 29 "runtime/trace" 30 "sort" 31 "strconv" 32 "strings" 33 "testing" 34 "text/tabwriter" 35 "time" 36 "unicode/utf8" 37 38 "github.com/cockroachdb/cockroach/pkg/base" 39 "github.com/cockroachdb/cockroach/pkg/col/coldata" 40 "github.com/cockroachdb/cockroach/pkg/kv/kvserver" 41 "github.com/cockroachdb/cockroach/pkg/roachpb" 42 "github.com/cockroachdb/cockroach/pkg/security" 43 "github.com/cockroachdb/cockroach/pkg/server" 44 "github.com/cockroachdb/cockroach/pkg/settings/cluster" 45 "github.com/cockroachdb/cockroach/pkg/sql/execinfra" 46 "github.com/cockroachdb/cockroach/pkg/sql/mutations" 47 "github.com/cockroachdb/cockroach/pkg/sql/parser" 48 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" 49 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" 50 "github.com/cockroachdb/cockroach/pkg/sql/row" 51 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 52 "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" 53 "github.com/cockroachdb/cockroach/pkg/sql/stats" 54 "github.com/cockroachdb/cockroach/pkg/testutils" 55 "github.com/cockroachdb/cockroach/pkg/testutils/physicalplanutils" 56 "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" 57 "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" 58 "github.com/cockroachdb/cockroach/pkg/util" 59 "github.com/cockroachdb/cockroach/pkg/util/envutil" 60 "github.com/cockroachdb/cockroach/pkg/util/log" 61 "github.com/cockroachdb/cockroach/pkg/util/randutil" 62 "github.com/cockroachdb/cockroach/pkg/util/syncutil" 63 "github.com/cockroachdb/cockroach/pkg/util/timeutil" 64 "github.com/cockroachdb/errors" 65 "github.com/lib/pq" 66 ) 67 68 // This file is home to TestLogic, a general-purpose engine for 69 // running SQL logic tests. 70 // 71 // TestLogic implements the infrastructure that runs end-to-end tests 72 // against CockroachDB's SQL layer. It is typically used to run 73 // CockroachDB's own tests (stored in the `testdata` directory) during 74 // development and CI, and a subset of SQLite's "Sqllogictest" during 75 // nightly CI runs. However, any test input can be specified via 76 // command-line flags (see below). 77 // 78 // In a nutshell, TestLogic reads one or more test input files 79 // containing sequences of SQL statements and queries. Each input file 80 // is meant to test a feature group. The reason why tests can/should 81 // be split across multiple files is that each test input file gets 82 // its own fresh, empty database. 83 // 84 // Input files for unit testing are stored alongside the source code 85 // in the `testdata` subdirectory. The input files for the larger 86 // TestSqlLiteLogic tests are stored in a separate repository. 87 // 88 // The test input is expressed using a domain-specific language, called 89 // Test-Script, defined by SQLite's "Sqllogictest". The official home 90 // of Sqllogictest and Test-Script is 91 // https://www.sqlite.org/sqllogictest/ 92 // 93 // (the TestSqlLiteLogic test uses a fork of the Sqllogictest test files; 94 // its input files are hosted at https://github.com/cockroachdb/sqllogictest) 95 // 96 // Test-Script is line-oriented. It supports both statements which 97 // generate no result rows, and queries that produce result rows. The 98 // result of queries can be checked either using an explicit reference 99 // output in the test file, or using the expected row count and a hash 100 // of the expected output. A test can also check for expected column 101 // names for query results, or expected errors. 102 // 103 // Logic tests can start with a directive as follows: 104 // 105 // # LogicTest: local fakedist 106 // 107 // This directive lists configurations; the test is run once in each 108 // configuration (in separate subtests). The configurations are defined by 109 // logicTestConfigs. If the directive is missing, the test is run in the 110 // default configuration. 111 // 112 // The directive also supports blacklists, i.e. running all specified 113 // configurations apart from a blacklisted configuration: 114 // 115 // # LogicTest: default-configs !3node-tenant 116 // 117 // If a blacklist is specified without an accompanying configuration, the 118 // default config is assumed. i.e., the following directive is equivalent to the 119 // one above: 120 // 121 // # LogicTest: !3node-tenant 122 // 123 // The Test-Script language is extended here for use with CockroachDB. The 124 // supported directives are: 125 // 126 // - statement ok 127 // Runs the statement that follows and expects success. For 128 // example: 129 // statement ok 130 // CREATE TABLE kv (k INT PRIMARY KEY, v INT) 131 // 132 // 133 // - statement count N 134 // Like "statement ok" but expect a final RowsAffected count of N. 135 // example: 136 // statement count 2 137 // INSERT INTO kv VALUES (1,2), (2,3) 138 // 139 // - statement error <regexp> 140 // Runs the statement that follows and expects an 141 // error that matches the given regexp. 142 // 143 // - query <typestring> <options> <label> 144 // Runs the query that follows and verifies the results (specified after the 145 // query and a ---- separator). Example: 146 // query I 147 // SELECT 1, 2 148 // ---- 149 // 1 2 150 // 151 // The type string specifies the number of columns and their types: 152 // - T for text; also used for various types which get converted 153 // to string (arrays, timestamps, etc.). 154 // - I for integer 155 // - R for floating point or decimal 156 // - B for boolean 157 // - O for oid 158 // 159 // Options are comma separated strings from the following: 160 // - nosort (default) 161 // - rowsort: sorts both the returned and the expected rows assuming one 162 // white-space separated word per column. 163 // - valuesort: sorts all values on all rows as one big set of 164 // strings (for both the returned and the expected rows). 165 // - partialsort(x,y,..): performs a partial sort on both the 166 // returned and the expected results, preserving the relative 167 // order of rows that differ on the specified columns 168 // (1-indexed); for results that are expected to be already 169 // ordered according to these columns. See partialSort() for 170 // more information. 171 // - colnames: column names are verified (the expected column names 172 // are the first line in the expected results). 173 // - retry: if the expected results do not match the actual results, the 174 // test will be retried with exponential backoff up to some maximum 175 // duration. If the test succeeds at any time during that period, it 176 // is considered successful. Otherwise, it is a failure. See 177 // testutils.SucceedsSoon for more information. If run with the 178 // -rewrite flag, inserts a 500ms sleep before executing the query 179 // once. 180 // - kvtrace: runs the query and compares against the results of the 181 // kv operations trace of the query. kvtrace optionally accepts 182 // arguments of the form kvtrace(op,op,...). Op is one of 183 // the accepted k/v arguments such as 'CPut', 'Scan' etc. It 184 // also accepts arguments of the form 'prefix=...'. For example, 185 // if kvtrace(CPut,Del,prefix=/Table/54,prefix=/Table/55), the 186 // results will be filtered to contain messages starting with 187 // CPut /Table/54, CPut /Table/55, Del /Table/54, Del /Table/55. 188 // Cannot be combined with noticetrace. 189 // - noticetrace: runs the query and compares only the notices that 190 // appear. Cannot be combined with kvtrace. 191 // 192 // The label is optional. If specified, the test runner stores a hash 193 // of the results of the query under the given label. If the label is 194 // reused, the test runner verifies that the results are the 195 // same. This can be used to verify that two or more queries in the 196 // same test script that are logically equivalent always generate the 197 // same output. If a label is provided, expected results don't need to 198 // be provided (in which case there should be no ---- separator). 199 // 200 // - query error <regexp> 201 // Runs the query that follows and expects an error 202 // that matches the given regexp. 203 // 204 // - repeat <number> 205 // It causes the following `statement` or `query` to be repeated the given 206 // number of times. For example: 207 // repeat 50 208 // statement ok 209 // INSERT INTO T VALUES ((SELECT MAX(k+1) FROM T)) 210 // 211 // - let $varname 212 // Executes the query that follows (expecting a single result) and remembers 213 // the result (as a string) for later use. Any `$varname` occurrences in 214 // subsequent statements or queries are replaced with the result. The 215 // variable name must start with a letter, and subsequent characters must be 216 // letters, digits, or underscores. Example: 217 // let $foo 218 // SELECT MAX(v) FROM kv 219 // 220 // statement ok 221 // SELECT * FROM kv WHERE v = $foo 222 // 223 // - sleep <duration> 224 // Introduces a sleep period. Example: sleep 2s 225 // 226 // - user <username> 227 // Changes the user for subsequent statements or queries. 228 // 229 // - skipif <mysql/mssql/postgresql/cockroachdb> 230 // Skips the following `statement` or `query` if the argument is postgresql 231 // or cockroachdb. 232 // 233 // - onlyif <mysql/mssql/postgresql/cockroachdb> 234 // Skips the following `statement` or query if the argument is not postgresql 235 // or cockroachdb. 236 // 237 // - traceon <file> 238 // Enables tracing to the given file. 239 // 240 // - traceoff 241 // Stops tracing. 242 // 243 // - kv-batch-size <num> 244 // Limits the kvfetcher batch size; it can be used to trigger certain error 245 // conditions or corner cases around limited batches. 246 // 247 // - subtest <testname> 248 // Defines the start of a subtest. The subtest is any number of statements 249 // that occur after this command until the end of file or the next subtest 250 // command. 251 // 252 // The overall architecture of TestLogic is as follows: 253 // 254 // - TestLogic() selects the input files and instantiates 255 // a `logicTest` object for each input file. 256 // 257 // - logicTest.run() sets up a new database. 258 // - logicTest.processTestFile() runs all tests from that input file. 259 // 260 // - each individual test in an input file is instantiated either as a 261 // logicStatement or logicQuery object. These are then processed by 262 // either logicTest.execStatement() or logicTest.execQuery(). 263 // 264 // TestLogic has three main parameter groups: 265 // 266 // - Which input files are processed. 267 // - How and when to stop when tests fail. 268 // - Which results are reported. 269 // 270 // The parameters are typically set using the TESTFLAGS `make` 271 // parameter, as in: 272 // 273 // make test PKG=./pkg/sql TESTS=TestLogic TESTFLAGS='....' 274 // 275 // Input file selection: 276 // 277 // -d <glob> selects all files matching <glob>. This can mix and 278 // match wildcards (*/?) or groups like {a,b,c}. 279 // 280 // -bigtest enable the long-running SqlLiteLogic test, which uses files from 281 // CockroachDB's fork of Sqllogictest. 282 // 283 // Configuration: 284 // 285 // -config name[,name2,...] customizes the test cluster configuration for test 286 // files that lack LogicTest directives; must be one 287 // of `logicTestConfigs`. 288 // Example: 289 // -config local,fakedist 290 // 291 // Error mode: 292 // 293 // -max-errors N stop testing after N errors have been 294 // encountered. Default 1. Set to 0 for no limit. 295 // 296 // -allow-prepare-fail 297 // tolerate / ignore errors that occur during query 298 // preparation. With -allow-prepare-fail you can 299 // indicate that it is OK as long as the database 300 // reports early to the client that it does not support 301 // a given query. Errors are still reported if queries 302 // fail during execution only or if a statement fails. 303 // 304 // -flex-types tolerate when a result column is produced with a 305 // different numeric type than the one expected by the 306 // test. This enables reusing tests designed for 307 // databases with slightly different typing semantics. 308 // 309 // Test output: 310 // 311 // -v (or -test.v if the test is compiled as a standalone 312 // binary). Go `testing`'s `verbose` flag. 313 // 314 // The output generated by the following flags is suppressed unless 315 // either -v is given or a test fails. 316 // 317 // -show-sql show SQL statements/queries immediately before they 318 // are tested. This can be useful for example when 319 // troubleshooting errors that cause the database/test 320 // to stop before the test completes. When -show-sql 321 // is set, individual test results are annoted with 322 // either "OK" (test passed as expected), "XFAIL" 323 // (expected failure, test failed as expected), or 324 // "FAIL" to indicate an unexpected/undesired test 325 // failure. 326 // 327 // -error-summary produces a report organized by error message 328 // of all queries that have caused that error. Useful 329 // with -allow-prepare-fail and/or -flex-types. 330 // 331 // -full-messages by default -error-summary shortens error messages 332 // and queries so they fit in a moderately large 333 // terminal screen. With this parameter, the 334 // full text of errors and queries is printed. 335 // 336 // Suggested use: 337 // 338 // - For validation testing: just -d or -bigtest. 339 // - For compatibility testing: add -allow-prepare-fail -flex-types. 340 // - For troubleshooting / analysis: add -v -show-sql -error-summary. 341 342 var ( 343 resultsRE = regexp.MustCompile(`^(\d+)\s+values?\s+hashing\s+to\s+([0-9A-Fa-f]+)$`) 344 errorRE = regexp.MustCompile(`^(?:statement|query)\s+error\s+(?:pgcode\s+([[:alnum:]]+)\s+)?(.*)$`) 345 varRE = regexp.MustCompile(`\$[a-zA-Z][a-zA-Z_0-9]*`) 346 347 // Input selection 348 logictestdata = flag.String("d", "", "glob that selects subset of files to run") 349 bigtest = flag.Bool("bigtest", false, "enable the long-running SqlLiteLogic test") 350 overrideConfig = flag.String( 351 "config", "", 352 "sets the test cluster configuration; comma-separated values", 353 ) 354 355 // Testing mode 356 maxErrs = flag.Int( 357 "max-errors", 1, 358 "stop processing input files after this number of errors (set to 0 for no limit)", 359 ) 360 allowPrepareFail = flag.Bool( 361 "allow-prepare-fail", false, "tolerate unexpected errors when preparing a query", 362 ) 363 flexTypes = flag.Bool( 364 "flex-types", false, 365 "do not fail when a test expects a column of a numeric type but the query provides another type", 366 ) 367 368 // Output parameters 369 showSQL = flag.Bool("show-sql", false, 370 "print the individual SQL statement/queries before processing", 371 ) 372 printErrorSummary = flag.Bool("error-summary", false, 373 "print a per-error summary of failing queries at the end of testing, "+ 374 "when -allow-prepare-fail is set", 375 ) 376 fullMessages = flag.Bool("full-messages", false, 377 "do not shorten the error or SQL strings when printing the summary for -allow-prepare-fail "+ 378 "or -flex-types.", 379 ) 380 rewriteResultsInTestfiles = flag.Bool( 381 "rewrite", false, 382 "ignore the expected results and rewrite the test files with the actual results from this "+ 383 "run. Used to update tests when a change affects many cases; please verify the testfile "+ 384 "diffs carefully!", 385 ) 386 rewriteSQL = flag.Bool( 387 "rewrite-sql", false, 388 "pretty-reformat the SQL queries. Only use this incidentally when importing new tests. "+ 389 "beware! some tests INTEND to use non-formatted SQL queries (e.g. case sensitivity). "+ 390 "do not bluntly apply!", 391 ) 392 sqlfmtLen = flag.Int("line-length", tree.DefaultPrettyCfg().LineWidth, 393 "target line length when using -rewrite-sql") 394 disableOptRuleProbability = flag.Float64( 395 "disable-opt-rule-probability", 0, 396 "disable transformation rules in the cost-based optimizer with the given probability.") 397 optimizerCostPerturbation = flag.Float64( 398 "optimizer-cost-perturbation", 0, 399 "randomly perturb the estimated cost of each expression in the query tree by at most the "+ 400 "given fraction for the purpose of creating alternate query plans in the optimizer.") 401 ) 402 403 type testClusterConfig struct { 404 // name is the name of the config (used for subtest names). 405 name string 406 numNodes int 407 useFakeSpanResolver bool 408 // if non-empty, overrides the default distsql mode. 409 overrideDistSQLMode string 410 // if non-empty, overrides the default vectorize mode. 411 overrideVectorize string 412 // if non-empty, overrides the default automatic statistics mode. 413 overrideAutoStats string 414 // if set, queries using distSQL processors or vectorized operators that can 415 // fall back to disk do so immediately, using only their disk-based 416 // implementation. 417 sqlExecUseDisk bool 418 // if set, enables DistSQL metadata propagation tests. 419 distSQLMetadataTestEnabled bool 420 // if set and the -test.short flag is passed, skip this config. 421 skipShort bool 422 // If not empty, bootstrapVersion controls what version the cluster will be 423 // bootstrapped at. 424 bootstrapVersion roachpb.Version 425 // If not empty, binaryVersion is used to set what the Server will consider 426 // to be the binary version. 427 binaryVersion roachpb.Version 428 disableUpgrade bool 429 // If true, a sql tenant server will be started and pointed at a node in the 430 // cluster. Connections on behalf of the logic test will go to that tenant. 431 useTenant bool 432 } 433 434 // logicTestConfigs contains all possible cluster configs. A test file can 435 // specify a list of configs they run on in a file-level comment like: 436 // # LogicTest: default distsql 437 // The test is run once on each configuration (in different subtests). 438 // If no configs are indicated, the default one is used (unless overridden 439 // via -config). 440 var logicTestConfigs = []testClusterConfig{ 441 { 442 name: "local", 443 numNodes: 1, 444 overrideDistSQLMode: "off", 445 overrideAutoStats: "false", 446 }, 447 { 448 name: "local-vec-off", 449 numNodes: 1, 450 overrideDistSQLMode: "off", 451 overrideAutoStats: "false", 452 overrideVectorize: "off", 453 }, 454 { 455 name: "local-v1.1@v1.0-noupgrade", 456 numNodes: 1, 457 overrideDistSQLMode: "off", 458 overrideAutoStats: "false", 459 bootstrapVersion: roachpb.Version{Major: 1}, 460 binaryVersion: roachpb.Version{Major: 1, Minor: 1}, 461 disableUpgrade: true, 462 }, 463 { 464 name: "local-vec-auto", 465 numNodes: 1, 466 overrideAutoStats: "false", 467 overrideVectorize: "201auto", 468 }, 469 { 470 name: "fakedist", 471 numNodes: 3, 472 useFakeSpanResolver: true, 473 overrideDistSQLMode: "on", 474 overrideAutoStats: "false", 475 }, 476 { 477 name: "local-mixed-19.2-20.1", 478 numNodes: 1, 479 overrideDistSQLMode: "off", 480 overrideAutoStats: "false", 481 bootstrapVersion: roachpb.Version{Major: 19, Minor: 2}, 482 binaryVersion: roachpb.Version{Major: 20, Minor: 1}, 483 disableUpgrade: true, 484 }, 485 { 486 name: "fakedist-vec-off", 487 numNodes: 3, 488 useFakeSpanResolver: true, 489 overrideDistSQLMode: "on", 490 overrideAutoStats: "false", 491 overrideVectorize: "off", 492 }, 493 { 494 name: "fakedist-vec-auto", 495 numNodes: 3, 496 useFakeSpanResolver: true, 497 overrideDistSQLMode: "on", 498 overrideAutoStats: "false", 499 overrideVectorize: "201auto", 500 }, 501 { 502 name: "fakedist-vec-auto-disk", 503 numNodes: 3, 504 useFakeSpanResolver: true, 505 overrideDistSQLMode: "on", 506 overrideAutoStats: "false", 507 overrideVectorize: "201auto", 508 sqlExecUseDisk: true, 509 skipShort: true, 510 }, 511 { 512 name: "fakedist-metadata", 513 numNodes: 3, 514 useFakeSpanResolver: true, 515 overrideDistSQLMode: "on", 516 overrideAutoStats: "false", 517 distSQLMetadataTestEnabled: true, 518 skipShort: true, 519 }, 520 { 521 name: "fakedist-disk", 522 numNodes: 3, 523 useFakeSpanResolver: true, 524 overrideDistSQLMode: "on", 525 overrideAutoStats: "false", 526 sqlExecUseDisk: true, 527 skipShort: true, 528 }, 529 { 530 name: "5node", 531 numNodes: 5, 532 overrideDistSQLMode: "on", 533 overrideAutoStats: "false", 534 }, 535 { 536 name: "5node-vec-auto", 537 numNodes: 5, 538 overrideDistSQLMode: "on", 539 overrideAutoStats: "false", 540 overrideVectorize: "201auto", 541 }, 542 { 543 name: "5node-vec-disk-auto", 544 numNodes: 5, 545 overrideDistSQLMode: "on", 546 overrideAutoStats: "false", 547 overrideVectorize: "201auto", 548 sqlExecUseDisk: true, 549 skipShort: true, 550 }, 551 { 552 name: "5node-metadata", 553 numNodes: 5, 554 overrideDistSQLMode: "on", 555 overrideAutoStats: "false", 556 distSQLMetadataTestEnabled: true, 557 skipShort: true, 558 }, 559 { 560 name: "5node-disk", 561 numNodes: 5, 562 overrideDistSQLMode: "on", 563 overrideAutoStats: "false", 564 sqlExecUseDisk: true, 565 skipShort: true, 566 }, 567 { 568 name: "3node-tenant", 569 numNodes: 3, 570 // overrideAutoStats will disable automatic stats on the cluster this tenant 571 // is connected to. 572 overrideAutoStats: "false", 573 useTenant: true, 574 }, 575 } 576 577 var logicTestConfigIdxToName = make(map[logicTestConfigIdx]string) 578 579 func init() { 580 for i, cfg := range logicTestConfigs { 581 logicTestConfigIdxToName[logicTestConfigIdx(i)] = cfg.name 582 } 583 } 584 585 func parseTestConfig(names []string) []logicTestConfigIdx { 586 ret := make([]logicTestConfigIdx, len(names)) 587 for i, name := range names { 588 idx, ok := findLogicTestConfig(name) 589 if !ok { 590 panic(fmt.Errorf("unknown config %s", name)) 591 } 592 ret[i] = idx 593 } 594 return ret 595 } 596 597 var ( 598 // defaultConfigName is a special alias for the default configs. 599 defaultConfigName = "default-configs" 600 defaultConfigNames = []string{ 601 "local", 602 "local-vec-off", 603 "local-vec-auto", 604 "fakedist", 605 "fakedist-vec-off", 606 "fakedist-vec-auto", 607 "fakedist-vec-auto-disk", 608 "fakedist-metadata", 609 "fakedist-disk", 610 "3node-tenant", 611 } 612 // fiveNodeDefaultConfigName is a special alias for all 5 node configs. 613 fiveNodeDefaultConfigName = "5node-default-configs" 614 fiveNodeDefaultConfigNames = []string{ 615 "5node", 616 "5node-vec-auto", 617 "5node-vec-disk-auto", 618 "5node-metadata", 619 "5node-disk", 620 } 621 defaultConfig = parseTestConfig(defaultConfigNames) 622 fiveNodeDefaultConfig = parseTestConfig(fiveNodeDefaultConfigNames) 623 ) 624 625 // An index in the above slice. 626 type logicTestConfigIdx int 627 628 func findLogicTestConfig(name string) (logicTestConfigIdx, bool) { 629 for i, cfg := range logicTestConfigs { 630 if cfg.name == name { 631 return logicTestConfigIdx(i), true 632 } 633 } 634 return -1, false 635 } 636 637 // lineScanner handles reading from input test files. 638 type lineScanner struct { 639 *bufio.Scanner 640 line int 641 skip bool 642 } 643 644 func newLineScanner(r io.Reader) *lineScanner { 645 return &lineScanner{ 646 Scanner: bufio.NewScanner(r), 647 line: 0, 648 } 649 } 650 651 func (l *lineScanner) Scan() bool { 652 ok := l.Scanner.Scan() 653 if ok { 654 l.line++ 655 } 656 return ok 657 } 658 659 func (l *lineScanner) Text() string { 660 return l.Scanner.Text() 661 } 662 663 // logicStatement represents a single statement test in Test-Script. 664 type logicStatement struct { 665 // file and line number of the test. 666 pos string 667 // SQL string to be sent to the database. 668 sql string 669 // expected error, if any. "" indicates the statement should 670 // succeed. 671 expectErr string 672 // expected pgcode for the error, if any. "" indicates the 673 // test does not check the pgwire error code. 674 expectErrCode string 675 // expected rows affected count. -1 to avoid testing this. 676 expectCount int64 677 } 678 679 // readSQL reads the lines of a SQL statement or query until the first blank 680 // line or (optionally) a "----" separator, and sets stmt.sql. 681 // 682 // If a separator is found, returns separator=true. If a separator is found when 683 // it is not expected, returns an error. 684 func (ls *logicStatement) readSQL( 685 t *logicTest, s *lineScanner, allowSeparator bool, 686 ) (separator bool, _ error) { 687 var buf bytes.Buffer 688 hasVars := false 689 for s.Scan() { 690 line := s.Text() 691 if !*rewriteSQL { 692 t.emit(line) 693 } 694 substLine := t.substituteVars(line) 695 if line != substLine { 696 hasVars = true 697 line = substLine 698 } 699 if line == "" { 700 break 701 } 702 if line == "----" { 703 separator = true 704 if ls.expectErr != "" { 705 return false, errors.Errorf( 706 "%s: invalid ---- separator after a statement or query expecting an error: %s", 707 ls.pos, ls.expectErr, 708 ) 709 } 710 if !allowSeparator { 711 return false, errors.Errorf("%s: unexpected ---- separator", ls.pos) 712 } 713 break 714 } 715 fmt.Fprintln(&buf, line) 716 } 717 ls.sql = strings.TrimSpace(buf.String()) 718 if *rewriteSQL { 719 if !hasVars { 720 newSyntax, err := func(inSql string) (string, error) { 721 // Can't rewrite the SQL otherwise because the vars make it invalid. 722 stmtList, err := parser.Parse(inSql) 723 if err != nil { 724 if ls.expectErr != "" { 725 // Maybe a parse error was expected. Simply do not rewrite. 726 return inSql, nil 727 } 728 return "", errors.Wrapf(err, "%s: error while parsing SQL for reformat:\n%s", ls.pos, ls.sql) 729 } 730 var newSyntax bytes.Buffer 731 pcfg := tree.DefaultPrettyCfg() 732 pcfg.LineWidth = *sqlfmtLen 733 pcfg.Simplify = false 734 pcfg.UseTabs = false 735 for i := range stmtList { 736 if i > 0 { 737 fmt.Fprintln(&newSyntax, ";") 738 } 739 fmt.Fprint(&newSyntax, pcfg.Pretty(stmtList[i].AST)) 740 } 741 return newSyntax.String(), nil 742 }(ls.sql) 743 if err != nil { 744 return false, err 745 } 746 ls.sql = newSyntax 747 } 748 749 t.emit(ls.sql) 750 if separator { 751 t.emit("----") 752 } else { 753 t.emit("") 754 } 755 } 756 return separator, nil 757 } 758 759 // logicSorter sorts result rows (or not) depending on Test-Script's 760 // sorting option for a "query" test. See the implementation of the 761 // "query" directive below for details. 762 type logicSorter func(numCols int, values []string) 763 764 type rowSorter struct { 765 numCols int 766 numRows int 767 values []string 768 } 769 770 func (r rowSorter) row(i int) []string { 771 return r.values[i*r.numCols : (i+1)*r.numCols] 772 } 773 774 func (r rowSorter) Len() int { 775 return r.numRows 776 } 777 778 func (r rowSorter) Less(i, j int) bool { 779 a := r.row(i) 780 b := r.row(j) 781 for k := range a { 782 if a[k] < b[k] { 783 return true 784 } 785 if a[k] > b[k] { 786 return false 787 } 788 } 789 return false 790 } 791 792 func (r rowSorter) Swap(i, j int) { 793 a := r.row(i) 794 b := r.row(j) 795 for i := range a { 796 a[i], b[i] = b[i], a[i] 797 } 798 } 799 800 func rowSort(numCols int, values []string) { 801 sort.Sort(rowSorter{ 802 numCols: numCols, 803 numRows: len(values) / numCols, 804 values: values, 805 }) 806 } 807 808 func valueSort(numCols int, values []string) { 809 sort.Strings(values) 810 } 811 812 // partialSort rearranges consecutive rows that have the same values on a 813 // certain set of columns (orderedCols). 814 // 815 // More specifically: rows are partitioned into groups of consecutive rows that 816 // have the same values for columns orderedCols. Inside each group, the rows are 817 // sorted. The relative order of any two rows that differ on orderedCols is 818 // preserved. 819 // 820 // This is useful when comparing results for a statement that guarantees a 821 // partial, but not a total order. Consider: 822 // 823 // SELECT a, b FROM ab ORDER BY a 824 // 825 // Some possible outputs for the same data: 826 // 1 2 1 5 1 2 827 // 1 5 1 4 1 4 828 // 1 4 or 1 2 or 1 5 829 // 2 3 2 2 2 3 830 // 2 2 2 3 2 2 831 // 832 // After a partialSort with orderedCols = {0} all become: 833 // 1 2 834 // 1 4 835 // 1 5 836 // 2 2 837 // 2 3 838 // 839 // An incorrect output like: 840 // 1 5 1 2 841 // 1 2 1 5 842 // 2 3 becomes: 2 2 843 // 2 2 2 3 844 // 1 4 1 4 845 // and it is detected as different. 846 func partialSort(numCols int, orderedCols []int, values []string) { 847 // We use rowSorter here only as a container. 848 c := rowSorter{ 849 numCols: numCols, 850 numRows: len(values) / numCols, 851 values: values, 852 } 853 854 // Sort the group of rows [rowStart, rowEnd). 855 sortGroup := func(rowStart, rowEnd int) { 856 sort.Sort(rowSorter{ 857 numCols: numCols, 858 numRows: rowEnd - rowStart, 859 values: values[rowStart*numCols : rowEnd*numCols], 860 }) 861 } 862 863 groupStart := 0 864 for rIdx := 1; rIdx < c.numRows; rIdx++ { 865 // See if this row belongs in the group with the previous row. 866 row := c.row(rIdx) 867 start := c.row(groupStart) 868 differs := false 869 for _, i := range orderedCols { 870 if start[i] != row[i] { 871 differs = true 872 break 873 } 874 } 875 if differs { 876 // Sort the group and start a new group with just this row in it. 877 sortGroup(groupStart, rIdx) 878 groupStart = rIdx 879 } 880 } 881 sortGroup(groupStart, c.numRows) 882 } 883 884 // logicQuery represents a single query test in Test-Script. 885 type logicQuery struct { 886 logicStatement 887 888 // colTypes indicates the expected result column types. 889 colTypes string 890 // colNames controls the inclusion of column names in the query result. 891 colNames bool 892 // retry indicates if the query should be retried in case of failure with 893 // exponential backoff up to some maximum duration. 894 retry bool 895 // some tests require the output to match modulo sorting. 896 sorter logicSorter 897 // expectedErr and expectedErrCode are as in logicStatement. 898 899 // if set, the results are cross-checked against previous queries with the 900 // same label. 901 label string 902 903 checkResults bool 904 // valsPerLine is the number of values included in each line of the expected 905 // results. This can either be 1, or else it must match the number of expected 906 // columns (i.e. len(colTypes)). 907 valsPerLine int 908 // expectedResults indicates the expected sequence of text words 909 // when flattening a query's results. 910 expectedResults []string 911 // expectedResultsRaw is the same as expectedResults, but 912 // retaining the original formatting (whitespace, indentation) as 913 // the test input file. This is used for pretty-printing unexpected 914 // results. 915 expectedResultsRaw []string 916 // expectedHash indicates the expected hash of all result rows 917 // combined. "" indicates hash checking is disabled. 918 expectedHash string 919 920 // expectedValues indicates the number of rows expected when 921 // expectedHash is set. 922 expectedValues int 923 924 // kvtrace indicates that we're comparing the output of a kv trace. 925 kvtrace bool 926 // kvOpTypes can be used only when kvtrace is true. It contains 927 // the particular operation types to filter on, such as CPut or Del. 928 kvOpTypes []string 929 keyPrefixFilters []string 930 931 // noticetrace indicates we're comparing the output of a notice trace. 932 noticetrace bool 933 934 // rawOpts are the query options, before parsing. Used to display in error 935 // messages. 936 rawOpts string 937 } 938 939 var allowedKVOpTypes = []string{ 940 "CPut", 941 "Put", 942 "InitPut", 943 "Del", 944 "ClearRange", 945 "Get", 946 "Scan", 947 "FKScan", 948 "CascadeScan", 949 } 950 951 func isAllowedKVOp(op string) bool { 952 for _, s := range allowedKVOpTypes { 953 if op == s { 954 return true 955 } 956 } 957 return false 958 } 959 960 // logicTest executes the test cases specified in a file. The file format is 961 // taken from the sqllogictest tool 962 // (http://www.sqlite.org/sqllogictest/doc/trunk/about.wiki) with various 963 // extensions to allow specifying errors and additional options. See 964 // https://github.com/gregrahn/sqllogictest/ for a github mirror of the 965 // sqllogictest source. 966 type logicTest struct { 967 rootT *testing.T 968 subtestT *testing.T 969 rng *rand.Rand 970 cfg testClusterConfig 971 // the number of nodes in the cluster. 972 cluster serverutils.TestClusterInterface 973 // the index of the node (within the cluster) against which we run the test 974 // statements. 975 nodeIdx int 976 // If this test uses a SQL tenant server, this is its address. In this case, 977 // all clients are created against this tenant. 978 tenantAddr string 979 // map of built clients. Needs to be persisted so that we can 980 // re-use them and close them all on exit. 981 clients map[string]*gosql.DB 982 // client currently in use. This can change during processing 983 // of a test input file when encountering the "user" directive. 984 // see setUser() for details. 985 user string 986 db *gosql.DB 987 cleanupFuncs []func() 988 // progress holds the number of tests executed so far. 989 progress int 990 // failures holds the number of tests failed so far, when 991 // -try-harder is set. 992 failures int 993 // unsupported holds the number of queries ignored due 994 // to prepare errors, when -allow-prepare-fail is set. 995 unsupported int 996 // lastProgress is used for the progress indicator message. 997 lastProgress time.Time 998 // traceFile holds the current trace file between "traceon" 999 // and "traceoff" directives. 1000 traceFile *os.File 1001 // verbose indicate whether -v was passed. 1002 verbose bool 1003 // perErrorSummary retains the per-error list of failing queries 1004 // when -error-summary is set. 1005 perErrorSummary map[string][]string 1006 // labelMap retains the expected result hashes that have 1007 // been marked using a result label in the input. See the 1008 // explanation for labels in processInputFiles(). 1009 labelMap map[string]string 1010 1011 // varMap remembers the variables set with "let". 1012 varMap map[string]string 1013 1014 // noticeBuffer retains the notices from the past query. 1015 noticeBuffer []string 1016 1017 rewriteResTestBuf bytes.Buffer 1018 1019 curPath string 1020 curLineNo int 1021 1022 // randomizedVectorizedBatchSize stores the randomized batch size for 1023 // vectorized engine if it is not turned off. The batch size will randomly be 1024 // set to 1 with 25% probability, {2, 3} with 25% probability or default batch 1025 // size with 50% probability. 1026 randomizedVectorizedBatchSize int 1027 } 1028 1029 func (t *logicTest) t() *testing.T { 1030 if t.subtestT != nil { 1031 return t.subtestT 1032 } 1033 return t.rootT 1034 } 1035 1036 func (t *logicTest) traceStart(filename string) { 1037 if t.traceFile != nil { 1038 t.Fatalf("tracing already active") 1039 } 1040 var err error 1041 t.traceFile, err = os.Create(filename) 1042 if err != nil { 1043 t.Fatalf("unable to open trace output file: %s", err) 1044 } 1045 if err := trace.Start(t.traceFile); err != nil { 1046 t.Fatalf("unable to start tracing: %s", err) 1047 } 1048 } 1049 1050 func (t *logicTest) traceStop() { 1051 if t.traceFile != nil { 1052 trace.Stop() 1053 t.traceFile.Close() 1054 t.traceFile = nil 1055 } 1056 } 1057 1058 // substituteVars replaces all occurrences of "$abc", where "abc" is a variable 1059 // previously defined by a let, with the value of that variable. 1060 func (t *logicTest) substituteVars(line string) string { 1061 if len(t.varMap) == 0 { 1062 return line 1063 } 1064 1065 // See if there are any $vars to replace. 1066 return varRE.ReplaceAllStringFunc(line, func(varName string) string { 1067 if replace, ok := t.varMap[varName]; ok { 1068 return replace 1069 } 1070 return line 1071 }) 1072 } 1073 1074 // emit is used for the --generate-testfiles mode; it emits a line of testfile. 1075 func (t *logicTest) emit(line string) { 1076 if *rewriteResultsInTestfiles || *rewriteSQL { 1077 t.rewriteResTestBuf.WriteString(line) 1078 t.rewriteResTestBuf.WriteString("\n") 1079 } 1080 } 1081 1082 func (t *logicTest) close() { 1083 t.traceStop() 1084 1085 for _, cleanup := range t.cleanupFuncs { 1086 cleanup() 1087 } 1088 t.cleanupFuncs = nil 1089 1090 if t.cluster != nil { 1091 t.cluster.Stopper().Stop(context.TODO()) 1092 t.cluster = nil 1093 } 1094 if t.clients != nil { 1095 for _, c := range t.clients { 1096 c.Close() 1097 } 1098 t.clients = nil 1099 } 1100 t.db = nil 1101 } 1102 1103 // out emits a message both on stdout and the log files if 1104 // verbose is set. 1105 func (t *logicTest) outf(format string, args ...interface{}) { 1106 if t.verbose { 1107 fmt.Printf(format, args...) 1108 fmt.Println() 1109 log.Infof(context.Background(), format, args...) 1110 } 1111 } 1112 1113 // setUser sets the DB client to the specified user. 1114 // It returns a cleanup function to be run when the credentials 1115 // are no longer needed. 1116 func (t *logicTest) setUser(user string) func() { 1117 if t.clients == nil { 1118 t.clients = map[string]*gosql.DB{} 1119 } 1120 if db, ok := t.clients[user]; ok { 1121 t.db = db 1122 t.user = user 1123 1124 // No cleanup necessary, but return a no-op func to avoid nil pointer dereference. 1125 return func() {} 1126 } 1127 1128 addr := t.tenantAddr 1129 if addr == "" { 1130 addr = t.cluster.Server(t.nodeIdx).ServingSQLAddr() 1131 } 1132 pgURL, cleanupFunc := sqlutils.PGUrl(t.rootT, addr, "TestLogic", url.User(user)) 1133 pgURL.Path = "test" 1134 1135 base, err := pq.NewConnector(pgURL.String()) 1136 if err != nil { 1137 t.Fatal(err) 1138 } 1139 1140 connector := pq.ConnectorWithNoticeHandler(base, func(notice *pq.Error) { 1141 t.noticeBuffer = append(t.noticeBuffer, notice.Severity+": "+notice.Message) 1142 if notice.Detail != "" { 1143 t.noticeBuffer = append(t.noticeBuffer, "DETAIL: "+notice.Detail) 1144 } 1145 if notice.Hint != "" { 1146 t.noticeBuffer = append(t.noticeBuffer, "HINT: "+notice.Hint) 1147 } 1148 }) 1149 db := gosql.OpenDB(connector) 1150 1151 // The default value for extra_float_digits assumed by tests is 1152 // 0. However, lib/pq by default configures this to 2 during 1153 // connection initialization, so we need to set it back to 0 before 1154 // we run anything. 1155 if _, err := db.Exec("SET extra_float_digits = 0"); err != nil { 1156 t.Fatal(err) 1157 } 1158 t.clients[user] = db 1159 t.db = db 1160 t.user = user 1161 1162 return cleanupFunc 1163 } 1164 1165 func (t *logicTest) setup(cfg testClusterConfig, serverArgs TestServerArgs) { 1166 t.cfg = cfg 1167 // TODO(pmattis): Add a flag to make it easy to run the tests against a local 1168 // MySQL or Postgres instance. 1169 // TODO(andrei): if createTestServerParams() is used here, the command filter 1170 // it installs detects a transaction that doesn't have 1171 // modifiedSystemConfigSpan set even though it should, for 1172 // "testdata/rename_table". Figure out what's up with that. 1173 var tempStorageConfig base.TempStorageConfig 1174 if serverArgs.tempStorageDiskLimit == 0 { 1175 tempStorageConfig = base.DefaultTestTempStorageConfig(cluster.MakeTestingClusterSettings()) 1176 } else { 1177 tempStorageConfig = base.DefaultTestTempStorageConfigWithSize(cluster.MakeTestingClusterSettings(), serverArgs.tempStorageDiskLimit) 1178 } 1179 params := base.TestClusterArgs{ 1180 ServerArgs: base.TestServerArgs{ 1181 // Specify a fixed memory limit (some test cases verify OOM conditions; we 1182 // don't want those to take long on large machines). 1183 SQLMemoryPoolSize: 192 * 1024 * 1024, 1184 TempStorageConfig: tempStorageConfig, 1185 Knobs: base.TestingKnobs{ 1186 Store: &kvserver.StoreTestingKnobs{ 1187 // The consistency queue makes a lot of noisy logs during logic tests. 1188 DisableConsistencyQueue: true, 1189 }, 1190 SQLEvalContext: &tree.EvalContextTestingKnobs{ 1191 AssertBinaryExprReturnTypes: true, 1192 AssertUnaryExprReturnTypes: true, 1193 AssertFuncExprReturnTypes: true, 1194 DisableOptimizerRuleProbability: *disableOptRuleProbability, 1195 OptimizerCostPerturbation: *optimizerCostPerturbation, 1196 }, 1197 }, 1198 ClusterName: "testclustername", 1199 UseDatabase: "test", 1200 }, 1201 // For distributed SQL tests, we use the fake span resolver; it doesn't 1202 // matter where the data really is. 1203 ReplicationMode: base.ReplicationManual, 1204 } 1205 1206 distSQLKnobs := &execinfra.TestingKnobs{ 1207 MetadataTestLevel: execinfra.Off, DeterministicStats: true, 1208 } 1209 if cfg.sqlExecUseDisk { 1210 distSQLKnobs.ForceDiskSpill = true 1211 } 1212 if cfg.distSQLMetadataTestEnabled { 1213 distSQLKnobs.MetadataTestLevel = execinfra.On 1214 } 1215 if strings.Compare(cfg.overrideVectorize, "off") != 0 { 1216 distSQLKnobs.EnableVectorizedInvariantsChecker = true 1217 } 1218 params.ServerArgs.Knobs.DistSQL = distSQLKnobs 1219 if cfg.bootstrapVersion != (roachpb.Version{}) { 1220 if params.ServerArgs.Knobs.Server == nil { 1221 params.ServerArgs.Knobs.Server = &server.TestingKnobs{} 1222 } 1223 params.ServerArgs.Knobs.Server.(*server.TestingKnobs).BootstrapVersionOverride = cfg.bootstrapVersion 1224 } 1225 if cfg.disableUpgrade { 1226 if params.ServerArgs.Knobs.Server == nil { 1227 params.ServerArgs.Knobs.Server = &server.TestingKnobs{} 1228 } 1229 params.ServerArgs.Knobs.Server.(*server.TestingKnobs).DisableAutomaticVersionUpgrade = 1 1230 } 1231 1232 if cfg.binaryVersion != (roachpb.Version{}) { 1233 // If we want to run a specific server version, we assume that it 1234 // supports at least the bootstrap version. 1235 paramsPerNode := map[int]base.TestServerArgs{} 1236 binaryMinSupportedVersion := cfg.binaryVersion 1237 if cfg.bootstrapVersion != (roachpb.Version{}) { 1238 binaryMinSupportedVersion = cfg.bootstrapVersion 1239 } 1240 for i := 0; i < cfg.numNodes; i++ { 1241 nodeParams := params.ServerArgs 1242 nodeParams.Settings = cluster.MakeTestingClusterSettingsWithVersions( 1243 cfg.binaryVersion, binaryMinSupportedVersion, false /* initializeVersion */) 1244 paramsPerNode[i] = nodeParams 1245 } 1246 params.ServerArgsPerNode = paramsPerNode 1247 } 1248 1249 // Update the defaults for automatic statistics to avoid delays in testing. 1250 // Avoid making the DefaultAsOfTime too small to avoid interacting with 1251 // schema changes and causing transaction retries. 1252 // TODO(radu): replace these with testing knobs. 1253 stats.DefaultAsOfTime = 10 * time.Millisecond 1254 stats.DefaultRefreshInterval = time.Millisecond 1255 1256 t.cluster = serverutils.StartTestCluster(t.rootT, cfg.numNodes, params) 1257 if cfg.useFakeSpanResolver { 1258 fakeResolver := physicalplanutils.FakeResolverForTestCluster(t.cluster) 1259 t.cluster.Server(t.nodeIdx).SetDistSQLSpanResolver(fakeResolver) 1260 } 1261 1262 connsForClusterSettingChanges := []*gosql.DB{t.cluster.ServerConn(0)} 1263 if cfg.useTenant { 1264 var err error 1265 t.tenantAddr, err = t.cluster.Server(t.nodeIdx).StartTenant(base.TestTenantArgs{TenantID: roachpb.MakeTenantID(10), AllowSettingClusterSettings: true}) 1266 if err != nil { 1267 t.rootT.Fatalf("%+v", err) 1268 } 1269 1270 // Open a connection to this tenant to set any cluster settings specified 1271 // by the test config. 1272 pgURL, cleanup := sqlutils.PGUrl(t.rootT, t.tenantAddr, "Tenant", url.User(security.RootUser)) 1273 defer cleanup() 1274 if params.ServerArgs.Insecure { 1275 pgURL.RawQuery = "sslmode=disable" 1276 } 1277 db, err := gosql.Open("postgres", pgURL.String()) 1278 if err != nil { 1279 t.rootT.Fatal(err) 1280 } 1281 defer db.Close() 1282 connsForClusterSettingChanges = append(connsForClusterSettingChanges, db) 1283 } 1284 1285 // Set cluster settings. 1286 for _, conn := range connsForClusterSettingChanges { 1287 if _, err := conn.Exec( 1288 "SET CLUSTER SETTING sql.stats.automatic_collection.min_stale_rows = $1::int", 5, 1289 ); err != nil { 1290 t.Fatal(err) 1291 } 1292 1293 if cfg.overrideDistSQLMode != "" { 1294 if _, err := conn.Exec( 1295 "SET CLUSTER SETTING sql.defaults.distsql = $1::string", cfg.overrideDistSQLMode, 1296 ); err != nil { 1297 t.Fatal(err) 1298 } 1299 } 1300 1301 if cfg.overrideVectorize != "" { 1302 if _, err := conn.Exec( 1303 "SET CLUSTER SETTING sql.defaults.vectorize = $1::string", cfg.overrideVectorize, 1304 ); err != nil { 1305 t.Fatal(err) 1306 } 1307 } 1308 1309 // Always override the vectorize row count threshold. This runs all supported 1310 // queries (relative to the mode) through the vectorized execution engine. 1311 if _, err := conn.Exec( 1312 "SET CLUSTER SETTING sql.defaults.vectorize_row_count_threshold = 0", 1313 ); err != nil { 1314 t.Fatal(err) 1315 } 1316 1317 if _, err := conn.Exec( 1318 fmt.Sprintf("SET CLUSTER SETTING sql.testing.vectorize.batch_size to %d", 1319 t.randomizedVectorizedBatchSize), 1320 ); err != nil { 1321 t.Fatal(err) 1322 } 1323 1324 if cfg.overrideAutoStats != "" { 1325 if _, err := conn.Exec( 1326 "SET CLUSTER SETTING sql.stats.automatic_collection.enabled = $1::bool", cfg.overrideAutoStats, 1327 ); err != nil { 1328 t.Fatal(err) 1329 } 1330 } else { 1331 // Background stats collection is enabled by default, but we've seen tests 1332 // flake with it on. When the issue manifests, it seems to be around a 1333 // schema change transaction getting pushed, which causes it to increment a 1334 // table ID twice instead of once, causing non-determinism. 1335 // 1336 // In the short term, we disable auto stats by default to avoid the flakes. 1337 // 1338 // In the long run, these tests should be running with default settings as 1339 // much as possible, so we likely want to address this. Two options are 1340 // either making schema changes more resilient to being pushed or possibly 1341 // making auto stats avoid pushing schema change transactions. There might 1342 // be other better alternatives than these. 1343 // 1344 // See #37751 for details. 1345 if _, err := conn.Exec( 1346 "SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false", 1347 ); err != nil { 1348 t.Fatal(err) 1349 } 1350 } 1351 } 1352 1353 if cfg.overrideDistSQLMode != "" { 1354 _, ok := sessiondata.DistSQLExecModeFromString(cfg.overrideDistSQLMode) 1355 if !ok { 1356 t.Fatalf("invalid distsql mode override: %s", cfg.overrideDistSQLMode) 1357 } 1358 // Wait until all servers are aware of the setting. 1359 testutils.SucceedsSoon(t.rootT, func() error { 1360 for i := 0; i < t.cluster.NumServers(); i++ { 1361 var m string 1362 err := t.cluster.ServerConn(i % t.cluster.NumServers()).QueryRow( 1363 "SHOW CLUSTER SETTING sql.defaults.distsql", 1364 ).Scan(&m) 1365 if err != nil { 1366 t.Fatal(errors.Wrapf(err, "%d", i)) 1367 } 1368 if m != cfg.overrideDistSQLMode { 1369 return errors.Errorf("node %d is still waiting for update of DistSQLMode to %s (have %s)", 1370 i, cfg.overrideDistSQLMode, m, 1371 ) 1372 } 1373 } 1374 return nil 1375 }) 1376 } 1377 1378 // db may change over the lifetime of this function, with intermediate 1379 // values cached in t.clients and finally closed in t.close(). 1380 t.cleanupFuncs = append(t.cleanupFuncs, t.setUser(security.RootUser)) 1381 1382 if _, err := t.db.Exec(` 1383 CREATE DATABASE test; 1384 `); err != nil { 1385 t.Fatal(err) 1386 } 1387 1388 if _, err := t.db.Exec(fmt.Sprintf("CREATE USER %s;", server.TestUser)); err != nil { 1389 t.Fatal(err) 1390 } 1391 1392 t.labelMap = make(map[string]string) 1393 t.varMap = make(map[string]string) 1394 1395 t.progress = 0 1396 t.failures = 0 1397 t.unsupported = 0 1398 } 1399 1400 // applyBlacklistToConfigIdxs applies the given blacklist to config idxs, 1401 // returning the result. 1402 func applyBlacklistToConfigIdxs( 1403 configIdxs []logicTestConfigIdx, blacklist map[string]struct{}, 1404 ) []logicTestConfigIdx { 1405 if len(blacklist) == 0 { 1406 return configIdxs 1407 } 1408 var newConfigIdxs []logicTestConfigIdx 1409 for _, idx := range configIdxs { 1410 if _, ok := blacklist[logicTestConfigIdxToName[idx]]; ok { 1411 continue 1412 } 1413 newConfigIdxs = append(newConfigIdxs, idx) 1414 } 1415 return newConfigIdxs 1416 } 1417 1418 // processConfigs, given a list of configNames, returns the list of 1419 // corresponding logicTestConfigIdxs. 1420 func processConfigs(t *testing.T, path string, configNames []string) []logicTestConfigIdx { 1421 const blacklistChar = '!' 1422 blacklist := make(map[string]struct{}) 1423 allConfigNamesAreBlacklistDirectives := true 1424 for _, configName := range configNames { 1425 if configName[0] != blacklistChar { 1426 allConfigNamesAreBlacklistDirectives = false 1427 continue 1428 } 1429 blacklist[configName[1:]] = struct{}{} 1430 } 1431 1432 var configs []logicTestConfigIdx 1433 if len(blacklist) != 0 && allConfigNamesAreBlacklistDirectives { 1434 // No configs specified, this blacklist applies to the default config. 1435 return applyBlacklistToConfigIdxs(defaultConfig, blacklist) 1436 } 1437 1438 for _, configName := range configNames { 1439 if configName[0] == blacklistChar { 1440 continue 1441 } 1442 if _, ok := blacklist[configName]; ok { 1443 continue 1444 } 1445 1446 idx, ok := findLogicTestConfig(configName) 1447 if !ok { 1448 switch configName { 1449 case defaultConfigName: 1450 configs = append(configs, applyBlacklistToConfigIdxs(defaultConfig, blacklist)...) 1451 case fiveNodeDefaultConfigName: 1452 configs = append(configs, applyBlacklistToConfigIdxs(fiveNodeDefaultConfig, blacklist)...) 1453 default: 1454 t.Fatalf("%s: unknown config name %s", path, configName) 1455 } 1456 } else { 1457 configs = append(configs, idx) 1458 } 1459 } 1460 1461 return configs 1462 } 1463 1464 // readTestFileConfigs reads any LogicTest directive at the beginning of a 1465 // test file. A line that starts with "# LogicTest:" specifies a list of 1466 // configuration names. The test file is run against each of those 1467 // configurations. 1468 // 1469 // Example: 1470 // # LogicTest: default distsql 1471 // 1472 // If the file doesn't contain a directive, the default config is returned. 1473 func readTestFileConfigs(t *testing.T, path string) []logicTestConfigIdx { 1474 file, err := os.Open(path) 1475 if err != nil { 1476 t.Fatal(err) 1477 } 1478 defer file.Close() 1479 1480 s := newLineScanner(file) 1481 for s.Scan() { 1482 fields := strings.Fields(s.Text()) 1483 if len(fields) == 0 { 1484 continue 1485 } 1486 cmd := fields[0] 1487 if !strings.HasPrefix(cmd, "#") { 1488 // Stop at the first line that's not a comment (or empty). 1489 break 1490 } 1491 // Directive lines are of the form: 1492 // # LogicTest: opt1=val1 opt2=val3 boolopt1 1493 if len(fields) > 1 && cmd == "#" && fields[1] == "LogicTest:" { 1494 if len(fields) == 2 { 1495 t.Fatalf("%s: empty LogicTest directive", path) 1496 } 1497 return processConfigs(t, path, fields[2:]) 1498 } 1499 } 1500 // No directive found, return the default config. 1501 return defaultConfig 1502 } 1503 1504 type subtestDetails struct { 1505 name string // the subtest's name, empty if not a subtest 1506 buffer *bytes.Buffer // a chunk of the test file representing the subtest 1507 lineLineIndexIntoFile int // the line number of the test file where the subtest started 1508 } 1509 1510 func (t *logicTest) processTestFile(path string, config testClusterConfig) error { 1511 subtests, err := fetchSubtests(path) 1512 if err != nil { 1513 return err 1514 } 1515 1516 if *showSQL { 1517 t.outf("--- queries start here (file: %s)", path) 1518 } 1519 defer t.printCompletion(path, config) 1520 1521 for _, subtest := range subtests { 1522 if *maxErrs > 0 && t.failures >= *maxErrs { 1523 break 1524 } 1525 // If subtest has no name, then it is not a subtest, so just run the lines 1526 // in the overall test. Note that this can only happen in the first subtest. 1527 if len(subtest.name) == 0 { 1528 if err := t.processSubtest(subtest, path, config); err != nil { 1529 return err 1530 } 1531 } else { 1532 t.emit(fmt.Sprintf("subtest %s", subtest.name)) 1533 t.rootT.Run(subtest.name, func(subtestT *testing.T) { 1534 t.subtestT = subtestT 1535 defer func() { 1536 t.subtestT = nil 1537 }() 1538 if err := t.processSubtest(subtest, path, config); err != nil { 1539 t.Error(err) 1540 } 1541 }) 1542 } 1543 } 1544 1545 if (*rewriteResultsInTestfiles || *rewriteSQL) && !t.rootT.Failed() { 1546 // Rewrite the test file. 1547 file, err := os.Create(path) 1548 if err != nil { 1549 return err 1550 } 1551 defer file.Close() 1552 // Remove any trailing blank line. 1553 data := t.rewriteResTestBuf.String() 1554 if l := len(data); l > 2 && data[l-1] == '\n' && data[l-2] == '\n' { 1555 data = data[:l-1] 1556 } 1557 fmt.Fprint(file, data) 1558 } 1559 1560 return nil 1561 } 1562 1563 // fetchSubtests reads through the test file and splices it into subtest chunks. 1564 // If there is no subtest, the output will only contain a single entry. 1565 func fetchSubtests(path string) ([]subtestDetails, error) { 1566 file, err := os.Open(path) 1567 if err != nil { 1568 return nil, err 1569 } 1570 defer file.Close() 1571 1572 s := newLineScanner(file) 1573 var subtests []subtestDetails 1574 var curName string 1575 var curLineIndexIntoFile int 1576 buffer := &bytes.Buffer{} 1577 for s.Scan() { 1578 line := s.Text() 1579 fields := strings.Fields(line) 1580 if len(fields) > 0 && fields[0] == "subtest" { 1581 if len(fields) != 2 { 1582 return nil, errors.Errorf( 1583 "%s:%d expected only one field following the subtest command\n"+ 1584 "Note that this check does not respect the other commands so if a query result has a "+ 1585 "line that starts with \"subtest\" it will either fail or be split into a subtest.", 1586 path, s.line, 1587 ) 1588 } 1589 subtests = append(subtests, subtestDetails{ 1590 name: curName, 1591 buffer: buffer, 1592 lineLineIndexIntoFile: curLineIndexIntoFile, 1593 }) 1594 buffer = &bytes.Buffer{} 1595 curName = fields[1] 1596 curLineIndexIntoFile = s.line + 1 1597 } else { 1598 buffer.WriteString(line) 1599 buffer.WriteRune('\n') 1600 } 1601 } 1602 subtests = append(subtests, subtestDetails{ 1603 name: curName, 1604 buffer: buffer, 1605 lineLineIndexIntoFile: curLineIndexIntoFile, 1606 }) 1607 1608 return subtests, nil 1609 } 1610 1611 func (t *logicTest) processSubtest( 1612 subtest subtestDetails, path string, config testClusterConfig, 1613 ) error { 1614 defer t.traceStop() 1615 1616 s := newLineScanner(subtest.buffer) 1617 t.lastProgress = timeutil.Now() 1618 1619 repeat := 1 1620 for s.Scan() { 1621 t.curPath, t.curLineNo = path, s.line+subtest.lineLineIndexIntoFile 1622 if *maxErrs > 0 && t.failures >= *maxErrs { 1623 return errors.Errorf("%s:%d: too many errors encountered, skipping the rest of the input", 1624 path, s.line+subtest.lineLineIndexIntoFile, 1625 ) 1626 } 1627 line := s.Text() 1628 t.emit(line) 1629 fields := strings.Fields(line) 1630 if len(fields) == 0 { 1631 continue 1632 } 1633 cmd := fields[0] 1634 if strings.HasPrefix(cmd, "#") { 1635 // Skip comment lines. 1636 continue 1637 } 1638 if len(fields) == 2 && fields[1] == "error" { 1639 return errors.Errorf("%s:%d: no expected error provided", 1640 path, s.line+subtest.lineLineIndexIntoFile, 1641 ) 1642 } 1643 switch cmd { 1644 case "repeat": 1645 // A line "repeat X" makes the test repeat the following statement or query X times. 1646 var err error 1647 count := 0 1648 if len(fields) != 2 { 1649 err = errors.New("invalid line format") 1650 } else if count, err = strconv.Atoi(fields[1]); err == nil && count < 2 { 1651 err = errors.New("invalid count") 1652 } 1653 if err != nil { 1654 return errors.Errorf("%s:%d invalid repeat line: %s", 1655 path, s.line+subtest.lineLineIndexIntoFile, err, 1656 ) 1657 } 1658 repeat = count 1659 1660 case "sleep": 1661 var err error 1662 var duration time.Duration 1663 // A line "sleep Xs" makes the test sleep for X seconds. 1664 if len(fields) != 2 { 1665 err = errors.New("invalid line format") 1666 } else if duration, err = time.ParseDuration(fields[1]); err != nil { 1667 err = errors.New("invalid duration") 1668 } 1669 if err != nil { 1670 return errors.Errorf("%s:%d invalid sleep line: %s", 1671 path, s.line+subtest.lineLineIndexIntoFile, err, 1672 ) 1673 } 1674 time.Sleep(duration) 1675 1676 case "statement": 1677 stmt := logicStatement{ 1678 pos: fmt.Sprintf("\n%s:%d", path, s.line+subtest.lineLineIndexIntoFile), 1679 expectCount: -1, 1680 } 1681 // Parse "statement error <regexp>" 1682 if m := errorRE.FindStringSubmatch(s.Text()); m != nil { 1683 stmt.expectErrCode = m[1] 1684 stmt.expectErr = m[2] 1685 } 1686 if len(fields) >= 3 && fields[1] == "count" { 1687 n, err := strconv.ParseInt(fields[2], 10, 64) 1688 if err != nil { 1689 return err 1690 } 1691 stmt.expectCount = n 1692 } 1693 if _, err := stmt.readSQL(t, s, false /* allowSeparator */); err != nil { 1694 return err 1695 } 1696 if !s.skip { 1697 for i := 0; i < repeat; i++ { 1698 if cont, err := t.execStatement(stmt); err != nil { 1699 if !cont { 1700 return err 1701 } 1702 t.Error(err) 1703 } 1704 } 1705 } else { 1706 s.skip = false 1707 } 1708 repeat = 1 1709 t.success(path) 1710 1711 case "query": 1712 var query logicQuery 1713 query.pos = fmt.Sprintf("\n%s:%d", path, s.line+subtest.lineLineIndexIntoFile) 1714 // Parse "query error <regexp>" 1715 if m := errorRE.FindStringSubmatch(s.Text()); m != nil { 1716 query.expectErrCode = m[1] 1717 query.expectErr = m[2] 1718 } else if len(fields) < 2 { 1719 return errors.Errorf("%s: invalid test statement: %s", query.pos, s.Text()) 1720 } else { 1721 // Parse "query <type-string> <options> <label>" 1722 query.colTypes = fields[1] 1723 if *bigtest { 1724 // bigtests put each expected value on its own line. 1725 query.valsPerLine = 1 1726 } else { 1727 // Otherwise, expect number of values to match expected type count. 1728 query.valsPerLine = len(query.colTypes) 1729 } 1730 1731 if len(fields) >= 3 { 1732 query.rawOpts = fields[2] 1733 1734 tokens := strings.Split(query.rawOpts, ",") 1735 1736 // For tokens of the form tok(arg1, arg2, arg3), we want to collapse 1737 // these split tokens into one. 1738 buildArgumentTokens := func(argToken string) { 1739 for i := 0; i < len(tokens)-1; i++ { 1740 if strings.HasPrefix(tokens[i], argToken+"(") && !strings.HasSuffix(tokens[i], ")") { 1741 // Merge this token with the next. 1742 tokens[i] = tokens[i] + "," + tokens[i+1] 1743 // Delete tokens[i+1]. 1744 copy(tokens[i+1:], tokens[i+2:]) 1745 tokens = tokens[:len(tokens)-1] 1746 // Look at the new token again. 1747 i-- 1748 } 1749 } 1750 } 1751 1752 buildArgumentTokens("partialsort") 1753 buildArgumentTokens("kvtrace") 1754 1755 for _, opt := range tokens { 1756 if strings.HasPrefix(opt, "partialsort(") && strings.HasSuffix(opt, ")") { 1757 s := opt 1758 s = strings.TrimPrefix(s, "partialsort(") 1759 s = strings.TrimSuffix(s, ")") 1760 1761 var orderedCols []int 1762 for _, c := range strings.Split(s, ",") { 1763 colIdx, err := strconv.Atoi(c) 1764 if err != nil || colIdx < 1 { 1765 return errors.Errorf("%s: invalid sort mode: %s", query.pos, opt) 1766 } 1767 orderedCols = append(orderedCols, colIdx-1) 1768 } 1769 if len(orderedCols) == 0 { 1770 return errors.Errorf("%s: invalid sort mode: %s", query.pos, opt) 1771 } 1772 query.sorter = func(numCols int, values []string) { 1773 partialSort(numCols, orderedCols, values) 1774 } 1775 continue 1776 } 1777 1778 if strings.HasPrefix(opt, "kvtrace(") && strings.HasSuffix(opt, ")") { 1779 s := opt 1780 s = strings.TrimPrefix(s, "kvtrace(") 1781 s = strings.TrimSuffix(s, ")") 1782 1783 query.kvtrace = true 1784 query.kvOpTypes = nil 1785 query.keyPrefixFilters = nil 1786 for _, c := range strings.Split(s, ",") { 1787 if strings.HasPrefix(c, "prefix=") { 1788 matched := strings.TrimPrefix(c, "prefix=") 1789 query.keyPrefixFilters = append(query.keyPrefixFilters, matched) 1790 } else if isAllowedKVOp(c) { 1791 query.kvOpTypes = append(query.kvOpTypes, c) 1792 } else { 1793 return errors.Errorf( 1794 "invalid filter '%s' provided. Expected one of %v or a prefix of the form 'prefix=x'", 1795 c, 1796 allowedKVOpTypes, 1797 ) 1798 } 1799 } 1800 continue 1801 } 1802 1803 switch opt { 1804 case "nosort": 1805 query.sorter = nil 1806 1807 case "rowsort": 1808 query.sorter = rowSort 1809 1810 case "valuesort": 1811 query.sorter = valueSort 1812 1813 case "colnames": 1814 query.colNames = true 1815 1816 case "retry": 1817 query.retry = true 1818 1819 case "kvtrace": 1820 // kvtrace without any arguments doesn't perform any additional 1821 // filtering of results. So it displays kv's from all tables 1822 // and all operation types. 1823 query.kvtrace = true 1824 query.kvOpTypes = nil 1825 query.keyPrefixFilters = nil 1826 1827 case "noticetrace": 1828 query.noticetrace = true 1829 1830 default: 1831 return errors.Errorf("%s: unknown sort mode: %s", query.pos, opt) 1832 } 1833 } 1834 } 1835 if len(fields) >= 4 { 1836 query.label = fields[3] 1837 } 1838 } 1839 1840 if query.noticetrace && query.kvtrace { 1841 return errors.Errorf( 1842 "%s: cannot have both noticetrace and kvtrace on at the same time", 1843 query.pos, 1844 ) 1845 } 1846 1847 separator, err := query.readSQL(t, s, true /* allowSeparator */) 1848 if err != nil { 1849 return err 1850 } 1851 1852 query.checkResults = true 1853 if separator { 1854 // Query results are either a space separated list of values up to a 1855 // blank line or a line of the form "xx values hashing to yyy". The 1856 // latter format is used by sqllogictest when a large number of results 1857 // match the query. 1858 if s.Scan() { 1859 if m := resultsRE.FindStringSubmatch(s.Text()); m != nil { 1860 var err error 1861 query.expectedValues, err = strconv.Atoi(m[1]) 1862 if err != nil { 1863 return err 1864 } 1865 query.expectedHash = m[2] 1866 query.checkResults = false 1867 } else { 1868 for { 1869 // Normalize each expected row by discarding leading/trailing 1870 // whitespace and by replacing each run of contiguous whitespace 1871 // with a single space. 1872 query.expectedResultsRaw = append(query.expectedResultsRaw, s.Text()) 1873 results := strings.Fields(s.Text()) 1874 if len(results) == 0 { 1875 break 1876 } 1877 1878 if query.sorter == nil { 1879 // When rows don't need to be sorted, then always compare by 1880 // tokens, regardless of where row/column boundaries are. 1881 query.expectedResults = append(query.expectedResults, results...) 1882 } else { 1883 // It's important to know where row/column boundaries are in 1884 // order to correctly perform sorting. Assume that boundaries 1885 // are delimited by whitespace. This means that values cannot 1886 // contain whitespace when there are multiple columns, since 1887 // that would be interpreted as extra values: 1888 // 1889 // foo bar baz 1890 // 1891 // If there are two expected columns, then it's not possible 1892 // to know whether the expected results are ("foo bar", "baz") 1893 // or ("foo", "bar baz"), so just error in that case. 1894 if query.valsPerLine == 1 { 1895 // Only one expected value per line, so never ambiguous, 1896 // even if there is whitespace in the value. 1897 query.expectedResults = append(query.expectedResults, strings.Join(results, " ")) 1898 } else { 1899 // Don't error if --rewrite is specified, since the expected 1900 // results are ignored in that case. 1901 if !*rewriteResultsInTestfiles && len(results) != len(query.colTypes) { 1902 return errors.Errorf("expected results are invalid: unexpected column count") 1903 } 1904 query.expectedResults = append(query.expectedResults, results...) 1905 } 1906 } 1907 1908 if !s.Scan() { 1909 break 1910 } 1911 } 1912 } 1913 } 1914 } else if query.label != "" { 1915 // Label and no separator; we won't be directly checking results; we 1916 // cross-check results between all queries with the same label. 1917 query.checkResults = false 1918 } 1919 1920 if !s.skip { 1921 if query.kvtrace { 1922 _, err := t.db.Exec("SET TRACING=on,kv") 1923 if err != nil { 1924 return err 1925 } 1926 _, err = t.db.Exec(query.sql) 1927 if err != nil { 1928 t.Error(err) 1929 } 1930 _, err = t.db.Exec("SET TRACING=off") 1931 if err != nil { 1932 return err 1933 } 1934 1935 queryPrefix := `SELECT message FROM [SHOW KV TRACE FOR SESSION] ` 1936 buildQuery := func(ops []string, keyFilters []string) string { 1937 var sb strings.Builder 1938 sb.WriteString(queryPrefix) 1939 if len(keyFilters) == 0 { 1940 keyFilters = []string{""} 1941 } 1942 for i, c := range ops { 1943 for j, f := range keyFilters { 1944 if i+j == 0 { 1945 sb.WriteString("WHERE ") 1946 } else { 1947 sb.WriteString("OR ") 1948 } 1949 sb.WriteString(fmt.Sprintf("message like '%s %s%%'", c, f)) 1950 } 1951 } 1952 return sb.String() 1953 } 1954 1955 query.colTypes = "T" 1956 if len(query.kvOpTypes) == 0 { 1957 query.sql = buildQuery(allowedKVOpTypes, query.keyPrefixFilters) 1958 } else { 1959 query.sql = buildQuery(query.kvOpTypes, query.keyPrefixFilters) 1960 } 1961 } 1962 1963 if query.noticetrace { 1964 query.colTypes = "T" 1965 } 1966 1967 for i := 0; i < repeat; i++ { 1968 if query.retry && !*rewriteResultsInTestfiles { 1969 testutils.SucceedsSoon(t.rootT, func() error { 1970 return t.execQuery(query) 1971 }) 1972 } else { 1973 if query.retry && *rewriteResultsInTestfiles { 1974 // The presence of the retry flag indicates that we expect this 1975 // query may need some time to succeed. If we are rewriting, wait 1976 // 500ms before executing the query. 1977 // TODO(rytaft): We may want to make this sleep time configurable. 1978 time.Sleep(time.Millisecond * 500) 1979 } 1980 if err := t.execQuery(query); err != nil { 1981 t.Error(err) 1982 } 1983 } 1984 } 1985 } else { 1986 s.skip = false 1987 } 1988 repeat = 1 1989 t.success(path) 1990 1991 case "let": 1992 // let $<name> 1993 // <query> 1994 if len(fields) != 2 { 1995 return errors.Errorf("let command requires one argument, found: %v", fields) 1996 } 1997 varName := fields[1] 1998 if !varRE.MatchString(varName) { 1999 return errors.Errorf("invalid target name for let: %s", varName) 2000 } 2001 2002 stmt := logicStatement{ 2003 pos: fmt.Sprintf("\n%s:%d", path, s.line+subtest.lineLineIndexIntoFile), 2004 } 2005 if _, err := stmt.readSQL(t, s, false /* allowSeparator */); err != nil { 2006 return err 2007 } 2008 rows, err := t.db.Query(stmt.sql) 2009 if err != nil { 2010 return errors.Wrapf(err, "%s: error running query %s", stmt.pos, stmt.sql) 2011 } 2012 if !rows.Next() { 2013 return errors.Errorf("%s: no rows returned by query %s", stmt.pos, stmt.sql) 2014 } 2015 var val string 2016 if err := rows.Scan(&val); err != nil { 2017 return errors.Wrapf(err, "%s: error getting result from query %s", stmt.pos, stmt.sql) 2018 } 2019 if rows.Next() { 2020 return errors.Errorf("%s: more than one row returned by query %s", stmt.pos, stmt.sql) 2021 } 2022 t.t().Logf("let %s = %s\n", varName, val) 2023 t.varMap[varName] = val 2024 2025 case "halt", "hash-threshold": 2026 2027 case "user": 2028 if len(fields) < 2 { 2029 return errors.Errorf("user command requires one argument, found: %v", fields) 2030 } 2031 if len(fields[1]) == 0 { 2032 return errors.Errorf("user command requires a non-blank argument") 2033 } 2034 cleanupUserFunc := t.setUser(fields[1]) 2035 defer cleanupUserFunc() 2036 2037 case "skip": 2038 reason := "skipped" 2039 if len(fields) > 1 { 2040 reason = fields[1] 2041 } 2042 t.t().Skip(reason) 2043 2044 case "skipif": 2045 if len(fields) < 2 { 2046 return errors.Errorf("skipif command requires one argument, found: %v", fields) 2047 } 2048 switch fields[1] { 2049 case "": 2050 return errors.Errorf("skipif command requires a non-blank argument") 2051 case "mysql", "mssql": 2052 case "postgresql", "cockroachdb": 2053 s.skip = true 2054 continue 2055 default: 2056 return errors.Errorf("unimplemented test statement: %s", s.Text()) 2057 } 2058 2059 case "onlyif": 2060 if len(fields) < 2 { 2061 return errors.Errorf("onlyif command requires one argument, found: %v", fields) 2062 } 2063 switch fields[1] { 2064 case "": 2065 return errors.New("onlyif command requires a non-blank argument") 2066 case "cockroachdb": 2067 case "mysql": 2068 s.skip = true 2069 continue 2070 case "mssql": 2071 s.skip = true 2072 continue 2073 default: 2074 return errors.Errorf("unimplemented test statement: %s", s.Text()) 2075 } 2076 2077 case "traceon": 2078 if len(fields) != 2 { 2079 return errors.Errorf("traceon requires a filename argument, found: %v", fields) 2080 } 2081 t.traceStart(fields[1]) 2082 2083 case "traceoff": 2084 if t.traceFile == nil { 2085 return errors.Errorf("no trace active") 2086 } 2087 t.traceStop() 2088 2089 case "kv-batch-size": 2090 // kv-batch-size limits the kvfetcher batch size. It can be used to 2091 // trigger certain error conditions around limited batches. 2092 if len(fields) != 2 { 2093 return errors.Errorf( 2094 "kv-batch-size needs an integer argument, found: %v", 2095 fields[1:], 2096 ) 2097 } 2098 batchSize, err := strconv.Atoi(fields[1]) 2099 if err != nil { 2100 return errors.Errorf("kv-batch-size needs an integer argument; %s", err) 2101 } 2102 t.outf("Setting kv batch size %d", batchSize) 2103 defer row.TestingSetKVBatchSize(int64(batchSize))() 2104 2105 default: 2106 return errors.Errorf("%s:%d: unknown command: %s", 2107 path, s.line+subtest.lineLineIndexIntoFile, cmd, 2108 ) 2109 } 2110 } 2111 return s.Err() 2112 } 2113 2114 // verifyError checks that either no error was found where none was 2115 // expected, or that an error was found when one was expected. 2116 // Returns a nil error to indicate the behavior was as expected. If 2117 // non-nil, returns also true in the boolean flag whether it is safe 2118 // to continue (i.e. an error was expected, an error was obtained, and 2119 // the errors didn't match). 2120 func (t *logicTest) verifyError( 2121 sql, pos, expectErr, expectErrCode string, err error, 2122 ) (bool, error) { 2123 if expectErr == "" && expectErrCode == "" && err != nil { 2124 cont := t.unexpectedError(sql, pos, err) 2125 if cont { 2126 // unexpectedError() already reported via t.Errorf. no need for more. 2127 err = nil 2128 } 2129 return cont, err 2130 } 2131 if !testutils.IsError(err, expectErr) { 2132 if err == nil { 2133 newErr := errors.Errorf("%s: %s\nexpected %q, but no error occurred", pos, sql, expectErr) 2134 return false, newErr 2135 } 2136 2137 errString := pgerror.FullError(err) 2138 newErr := errors.Errorf("%s: %s\nexpected:\n%s\n\ngot:\n%s", pos, sql, expectErr, errString) 2139 if err != nil && strings.Contains(errString, expectErr) { 2140 t.t().Logf("The output string contained the input regexp. Perhaps you meant to write:\n"+ 2141 "query error %s", regexp.QuoteMeta(errString)) 2142 } 2143 // We can't rewrite the error, but we can at least print the regexp. 2144 if *rewriteResultsInTestfiles { 2145 r := regexp.QuoteMeta(errString) 2146 r = strings.Trim(r, "\n ") 2147 r = strings.ReplaceAll(r, "\n", "\\n") 2148 t.t().Logf("Error regexp: %s\n", r) 2149 } 2150 return (err == nil) == (expectErr == ""), newErr 2151 } 2152 if err != nil { 2153 if pqErr := (*pq.Error)(nil); errors.As(err, &pqErr) && 2154 strings.HasPrefix(string(pqErr.Code), "XX" /* internal error, corruption, etc */) && 2155 string(pqErr.Code) != pgcode.Uncategorized /* this is also XX but innocuous */ { 2156 if expectErrCode != string(pqErr.Code) { 2157 return false, errors.Errorf( 2158 "%s: %s: serious error with code %q occurred; if expected, must use 'error pgcode %s ...' in test:\n%s", 2159 pos, sql, pqErr.Code, pqErr.Code, pgerror.FullError(err)) 2160 } 2161 } 2162 } 2163 if expectErrCode != "" { 2164 if err != nil { 2165 var pqErr *pq.Error 2166 if !errors.As(err, &pqErr) { 2167 newErr := errors.Errorf("%s %s\n: expected error code %q, but the error we found is not "+ 2168 "a libpq error: %s", pos, sql, expectErrCode, err) 2169 return true, newErr 2170 } 2171 if pqErr.Code != pq.ErrorCode(expectErrCode) { 2172 newErr := errors.Errorf("%s: %s\nexpected error code %q, but found code %q (%s)", 2173 pos, sql, expectErrCode, pqErr.Code, pqErr.Code.Name()) 2174 return true, newErr 2175 } 2176 } else { 2177 newErr := errors.Errorf("%s: %s\nexpected error code %q, but found success", 2178 pos, sql, expectErrCode) 2179 return (err != nil), newErr 2180 } 2181 } 2182 return true, nil 2183 } 2184 2185 // formatErr attempts to provide more details if present. 2186 func formatErr(err error) string { 2187 if pqErr := (*pq.Error)(nil); errors.As(err, &pqErr) { 2188 var buf bytes.Buffer 2189 fmt.Fprintf(&buf, "(%s) %s", pqErr.Code, pqErr.Message) 2190 if pqErr.File != "" || pqErr.Line != "" || pqErr.Routine != "" { 2191 fmt.Fprintf(&buf, "\n%s:%s: in %s()", pqErr.File, pqErr.Line, pqErr.Routine) 2192 } 2193 if pqErr.Detail != "" { 2194 fmt.Fprintf(&buf, "\nDETAIL: %s", pqErr.Detail) 2195 } 2196 if pqErr.Code == pgcode.Internal { 2197 fmt.Fprintln(&buf, "\nNOTE: internal errors may have more details in logs. Use -show-logs.") 2198 } 2199 return buf.String() 2200 } 2201 return err.Error() 2202 } 2203 2204 // unexpectedError handles ignoring queries that fail during prepare 2205 // when -allow-prepare-fail is specified. The argument "sql" is "" to indicate the 2206 // work is done on behalf of a statement, which always fail upon an 2207 // unexpected error. 2208 func (t *logicTest) unexpectedError(sql string, pos string, err error) bool { 2209 if *allowPrepareFail && sql != "" { 2210 // This is a query and -allow-prepare-fail is set. Try to prepare 2211 // the query. If prepare fails, this means we (probably) do not 2212 // support the input syntax, and -allow-prepare-fail instructs us 2213 // to ignore the unexpected error. 2214 stmt, err := t.db.Prepare(sql) 2215 if err != nil { 2216 if *showSQL { 2217 t.outf("\t-- fails prepare: %s", formatErr(err)) 2218 } 2219 t.signalIgnoredError(err, pos, sql) 2220 return true 2221 } 2222 if err := stmt.Close(); err != nil { 2223 t.Errorf("%s: %s\nerror when closing prepared statement: %s", sql, pos, formatErr(err)) 2224 } 2225 } 2226 t.Errorf("%s: %s\nexpected success, but found\n%s", pos, sql, formatErr(err)) 2227 return false 2228 } 2229 2230 func (t *logicTest) execStatement(stmt logicStatement) (bool, error) { 2231 if *showSQL { 2232 t.outf("%s;", stmt.sql) 2233 } 2234 execSQL, changed := mutations.ApplyString(t.rng, stmt.sql, mutations.ColumnFamilyMutator) 2235 if changed { 2236 t.outf("rewrote:\n%s\n", execSQL) 2237 } 2238 res, err := t.db.Exec(execSQL) 2239 if err == nil { 2240 sqlutils.VerifyStatementPrettyRoundtrip(t.t(), stmt.sql) 2241 } 2242 if err == nil && stmt.expectCount >= 0 { 2243 var count int64 2244 count, err = res.RowsAffected() 2245 2246 // If err becomes non-nil here, we'll catch it below. 2247 2248 if err == nil && count != stmt.expectCount { 2249 t.Errorf("%s: %s\nexpected %d rows affected, got %d", stmt.pos, execSQL, stmt.expectCount, count) 2250 } 2251 } 2252 2253 // General policy for failing vs. continuing: 2254 // - we want to do as much work as possible; 2255 // - however, a statement that fails when it should succeed or 2256 // a statement that succeeds when it should fail may have left 2257 // the database in an improper state, so we stop there; 2258 // - error on expected error is worth going further, even 2259 // if the obtained error does not match the expected error. 2260 cont, err := t.verifyError("", stmt.pos, stmt.expectErr, stmt.expectErrCode, err) 2261 if err != nil { 2262 t.finishOne("OK") 2263 } 2264 return cont, err 2265 } 2266 2267 func (t *logicTest) hashResults(results []string) (string, error) { 2268 // Hash the values using MD5. This hashing precisely matches the hashing in 2269 // sqllogictest.c. 2270 h := md5.New() 2271 for _, r := range results { 2272 if _, err := h.Write(append([]byte(r), byte('\n'))); err != nil { 2273 return "", err 2274 } 2275 } 2276 return fmt.Sprintf("%x", h.Sum(nil)), nil 2277 } 2278 2279 func (t *logicTest) execQuery(query logicQuery) error { 2280 if *showSQL { 2281 t.outf("%s;", query.sql) 2282 } 2283 2284 t.noticeBuffer = nil 2285 2286 rows, err := t.db.Query(query.sql) 2287 if err == nil { 2288 sqlutils.VerifyStatementPrettyRoundtrip(t.t(), query.sql) 2289 2290 // If expecting an error, then read all result rows, since some errors are 2291 // only triggered after initial rows are returned. 2292 if query.expectErr != "" { 2293 // Break early if error is detected, and be sure to test for error in case 2294 // where Next returns false. 2295 for rows.Next() { 2296 if rows.Err() != nil { 2297 break 2298 } 2299 } 2300 err = rows.Err() 2301 } 2302 } 2303 if _, err := t.verifyError(query.sql, query.pos, query.expectErr, query.expectErrCode, err); err != nil { 2304 return err 2305 } 2306 if err != nil { 2307 // An error occurred, but it was expected. 2308 t.finishOne("XFAIL") 2309 //nolint:returnerrcheck 2310 return nil 2311 } 2312 defer rows.Close() 2313 2314 var actualResultsRaw []string 2315 if query.noticetrace { 2316 // We have to force close the results for the notice handler from lib/pq 2317 // returns results. 2318 if err := rows.Err(); err != nil { 2319 return err 2320 } 2321 rows.Close() 2322 actualResultsRaw = t.noticeBuffer 2323 } else { 2324 cols, err := rows.Columns() 2325 if err != nil { 2326 return err 2327 } 2328 if len(query.colTypes) != len(cols) { 2329 return fmt.Errorf("%s: expected %d columns, but found %d", 2330 query.pos, len(query.colTypes), len(cols)) 2331 } 2332 vals := make([]interface{}, len(cols)) 2333 for i := range vals { 2334 vals[i] = new(interface{}) 2335 } 2336 2337 if query.colNames { 2338 actualResultsRaw = append(actualResultsRaw, cols...) 2339 } 2340 for rows.Next() { 2341 if err := rows.Scan(vals...); err != nil { 2342 return err 2343 } 2344 for i, v := range vals { 2345 if val := *v.(*interface{}); val != nil { 2346 valT := reflect.TypeOf(val).Kind() 2347 colT := query.colTypes[i] 2348 switch colT { 2349 case 'T': 2350 if valT != reflect.String && valT != reflect.Slice && valT != reflect.Struct { 2351 return fmt.Errorf("%s: expected text value for column %d, but found %T: %#v", 2352 query.pos, i, val, val, 2353 ) 2354 } 2355 case 'I': 2356 if valT != reflect.Int64 { 2357 if *flexTypes && (valT == reflect.Float64 || valT == reflect.Slice) { 2358 t.signalIgnoredError( 2359 fmt.Errorf("result type mismatch: expected I, got %T", val), query.pos, query.sql, 2360 ) 2361 return nil 2362 } 2363 return fmt.Errorf("%s: expected int value for column %d, but found %T: %#v", 2364 query.pos, i, val, val, 2365 ) 2366 } 2367 case 'R': 2368 if valT != reflect.Float64 && valT != reflect.Slice { 2369 if *flexTypes && (valT == reflect.Int64) { 2370 t.signalIgnoredError( 2371 fmt.Errorf("result type mismatch: expected R, got %T", val), query.pos, query.sql, 2372 ) 2373 return nil 2374 } 2375 return fmt.Errorf("%s: expected float/decimal value for column %d, but found %T: %#v", 2376 query.pos, i, val, val, 2377 ) 2378 } 2379 case 'B': 2380 if valT != reflect.Bool { 2381 return fmt.Errorf("%s: expected boolean value for column %d, but found %T: %#v", 2382 query.pos, i, val, val, 2383 ) 2384 } 2385 case 'O': 2386 if valT != reflect.Slice { 2387 return fmt.Errorf("%s: expected oid value for column %d, but found %T: %#v", 2388 query.pos, i, val, val, 2389 ) 2390 } 2391 default: 2392 return fmt.Errorf("%s: unknown type in type string: %c in %s", 2393 query.pos, colT, query.colTypes, 2394 ) 2395 } 2396 2397 if byteArray, ok := val.([]byte); ok { 2398 // The postgres wire protocol does not distinguish between 2399 // strings and byte arrays, but our tests do. In order to do 2400 // The Right Thing™, we replace byte arrays which are valid 2401 // UTF-8 with strings. This allows byte arrays which are not 2402 // valid UTF-8 to print as a list of bytes (e.g. `[124 107]`) 2403 // while printing valid strings naturally. 2404 if str := string(byteArray); utf8.ValidString(str) { 2405 val = str 2406 } 2407 } 2408 // Empty strings are rendered as "·" (middle dot) 2409 if val == "" { 2410 val = "·" 2411 } 2412 actualResultsRaw = append(actualResultsRaw, fmt.Sprint(val)) 2413 } else { 2414 actualResultsRaw = append(actualResultsRaw, "NULL") 2415 } 2416 } 2417 } 2418 if err := rows.Err(); err != nil { 2419 return err 2420 } 2421 } 2422 2423 // Normalize each row in the result by mapping each run of contiguous 2424 // whitespace to a single space. 2425 var actualResults []string 2426 if actualResultsRaw != nil { 2427 actualResults = make([]string, 0, len(actualResultsRaw)) 2428 for _, result := range actualResultsRaw { 2429 if query.sorter == nil || query.valsPerLine != 1 { 2430 actualResults = append(actualResults, strings.Fields(result)...) 2431 } else { 2432 actualResults = append(actualResults, strings.Join(strings.Fields(result), " ")) 2433 } 2434 } 2435 } 2436 2437 if query.sorter != nil { 2438 query.sorter(len(query.colTypes), actualResults) 2439 query.sorter(len(query.colTypes), query.expectedResults) 2440 } 2441 2442 hash, err := t.hashResults(actualResults) 2443 if err != nil { 2444 return err 2445 } 2446 2447 if query.expectedHash != "" { 2448 n := len(actualResults) 2449 if query.expectedValues != n { 2450 return fmt.Errorf("%s: expected %d results, but found %d", query.pos, query.expectedValues, n) 2451 } 2452 if query.expectedHash != hash { 2453 return fmt.Errorf("%s: expected %s, but found %s", query.pos, query.expectedHash, hash) 2454 } 2455 } 2456 2457 if *rewriteResultsInTestfiles || *rewriteSQL { 2458 if query.expectedHash != "" { 2459 if query.expectedValues == 1 { 2460 t.emit(fmt.Sprintf("1 value hashing to %s", query.expectedHash)) 2461 } else { 2462 t.emit(fmt.Sprintf("%d values hashing to %s", query.expectedValues, query.expectedHash)) 2463 } 2464 } 2465 2466 if query.checkResults { 2467 // If the results match or we're not rewriting, emit them the way they were originally 2468 // formatted/ordered in the testfile. Otherwise, emit the actual results. 2469 if !*rewriteResultsInTestfiles || reflect.DeepEqual(query.expectedResults, actualResults) { 2470 for _, l := range query.expectedResultsRaw { 2471 t.emit(l) 2472 } 2473 } else { 2474 // Emit the actual results. 2475 for _, line := range t.formatValues(actualResultsRaw, query.valsPerLine) { 2476 t.emit(line) 2477 } 2478 } 2479 } 2480 return nil 2481 } 2482 2483 if query.checkResults && !reflect.DeepEqual(query.expectedResults, actualResults) { 2484 var buf bytes.Buffer 2485 fmt.Fprintf(&buf, "%s: %s\nexpected:\n", query.pos, query.sql) 2486 for _, line := range query.expectedResultsRaw { 2487 fmt.Fprintf(&buf, " %s\n", line) 2488 } 2489 sortMsg := "" 2490 if query.sorter != nil { 2491 // We performed an order-insensitive comparison of "actual" vs "expected" 2492 // rows by sorting both, but we'll display the error with the expected 2493 // rows in the order in which they were put in the file, and the actual 2494 // rows in the order in which the query returned them. 2495 sortMsg = " -> ignore the following ordering of rows" 2496 } 2497 fmt.Fprintf(&buf, "but found (query options: %q%s) :\n", query.rawOpts, sortMsg) 2498 for _, line := range t.formatValues(actualResultsRaw, query.valsPerLine) { 2499 fmt.Fprintf(&buf, " %s\n", line) 2500 } 2501 return errors.Newf("%s", buf.String()) 2502 } 2503 2504 if query.label != "" { 2505 if prevHash, ok := t.labelMap[query.label]; ok && prevHash != hash { 2506 t.Errorf( 2507 "%s: error in input: previous values for label %s (hash %s) do not match (hash %s)", 2508 query.pos, query.label, prevHash, hash, 2509 ) 2510 } 2511 t.labelMap[query.label] = hash 2512 } 2513 2514 t.finishOne("OK") 2515 return nil 2516 } 2517 2518 func (t *logicTest) formatValues(vals []string, valsPerLine int) []string { 2519 var buf bytes.Buffer 2520 tw := tabwriter.NewWriter(&buf, 2, 1, 2, ' ', 0) 2521 2522 for line := 0; line < len(vals)/valsPerLine; line++ { 2523 for i := 0; i < valsPerLine; i++ { 2524 fmt.Fprintf(tw, "%s\t", vals[line*valsPerLine+i]) 2525 } 2526 fmt.Fprint(tw, "\n") 2527 } 2528 _ = tw.Flush() 2529 2530 // Split into lines and trim any trailing whitespace. 2531 // Note that the last line will be empty (which is what we want). 2532 results := make([]string, 0, len(vals)/valsPerLine) 2533 for _, s := range strings.Split(buf.String(), "\n") { 2534 results = append(results, strings.TrimRight(s, " ")) 2535 } 2536 return results 2537 } 2538 2539 func (t *logicTest) success(file string) { 2540 t.progress++ 2541 now := timeutil.Now() 2542 if now.Sub(t.lastProgress) >= 2*time.Second { 2543 t.lastProgress = now 2544 t.outf("--- progress: %s: %d statements/queries", file, t.progress) 2545 } 2546 } 2547 2548 func (t *logicTest) runFile(path string, config testClusterConfig) { 2549 defer t.close() 2550 2551 defer func() { 2552 if r := recover(); r != nil { 2553 // Translate panics during the test to test errors. 2554 t.Fatalf("panic: %v\n%s", r, string(debug.Stack())) 2555 } 2556 }() 2557 2558 if err := t.processTestFile(path, config); err != nil { 2559 t.Fatal(err) 2560 } 2561 } 2562 2563 var skipLogicTests = envutil.EnvOrDefaultBool("COCKROACH_LOGIC_TESTS_SKIP", false) 2564 var logicTestsConfigExclude = envutil.EnvOrDefaultString("COCKROACH_LOGIC_TESTS_SKIP_CONFIG", "") 2565 var logicTestsConfigFilter = envutil.EnvOrDefaultString("COCKROACH_LOGIC_TESTS_CONFIG", "") 2566 2567 // TestServerArgs contains the parameters that callers of RunLogicTest might 2568 // want to specify for the test clusters to be created with. 2569 type TestServerArgs struct { 2570 // tempStorageDiskLimit determines the limit for the temp storage (that is 2571 // actually in-memory). If it is unset, then the default limit of 100MB 2572 // will be used. 2573 tempStorageDiskLimit int64 2574 } 2575 2576 // RunLogicTest is the main entry point for the logic test. The globs parameter 2577 // specifies the default sets of files to run. 2578 func RunLogicTest(t *testing.T, serverArgs TestServerArgs, globs ...string) { 2579 // Note: there is special code in teamcity-trigger/main.go to run this package 2580 // with less concurrency in the nightly stress runs. If you see problems 2581 // please make adjustments there. 2582 // As of 6/4/2019, the logic tests never complete under race. 2583 if testutils.NightlyStress() && util.RaceEnabled { 2584 t.Skip("logic tests and race detector don't mix: #37993") 2585 } 2586 2587 if skipLogicTests { 2588 t.Skip("COCKROACH_LOGIC_TESTS_SKIP") 2589 } 2590 2591 // Override default glob sets if -d flag was specified. 2592 if *logictestdata != "" { 2593 globs = []string{*logictestdata} 2594 } 2595 2596 // Set time.Local to time.UTC to circumvent pq's timetz parsing flaw. 2597 time.Local = time.UTC 2598 2599 // A new cluster is set up for each separate file in the test. 2600 var paths []string 2601 for _, g := range globs { 2602 match, err := filepath.Glob(g) 2603 if err != nil { 2604 t.Fatal(err) 2605 } 2606 paths = append(paths, match...) 2607 } 2608 2609 if len(paths) == 0 { 2610 t.Fatalf("No testfiles found (globs: %v)", globs) 2611 } 2612 2613 // mu protects the following vars, which all get updated from within the 2614 // possibly parallel subtests. 2615 var progress = struct { 2616 syncutil.Mutex 2617 total, totalFail, totalUnsupported int 2618 lastProgress time.Time 2619 }{ 2620 lastProgress: timeutil.Now(), 2621 } 2622 2623 // Read the configuration directives from all the files and accumulate a list 2624 // of paths per config. 2625 configPaths := make([][]string, len(logicTestConfigs)) 2626 var configs []logicTestConfigIdx 2627 if *overrideConfig != "" { 2628 configs = parseTestConfig(strings.Split(*overrideConfig, ",")) 2629 } 2630 2631 for _, path := range paths { 2632 if *overrideConfig == "" { 2633 configs = readTestFileConfigs(t, path) 2634 } 2635 for _, idx := range configs { 2636 configPaths[idx] = append(configPaths[idx], path) 2637 } 2638 } 2639 2640 // Determining whether or not to randomize vectorized batch size. 2641 rng, _ := randutil.NewPseudoRand() 2642 randVal := rng.Float64() 2643 randomizedVectorizedBatchSize := coldata.BatchSize() 2644 if randVal < 0.25 { 2645 randomizedVectorizedBatchSize = 1 2646 } else if randVal < 0.375 { 2647 randomizedVectorizedBatchSize = 2 2648 } else if randVal < 0.5 { 2649 randomizedVectorizedBatchSize = 3 2650 } 2651 if randomizedVectorizedBatchSize != coldata.BatchSize() { 2652 t.Log(fmt.Sprintf("randomize batchSize to %d", randomizedVectorizedBatchSize)) 2653 } 2654 2655 // The tests below are likely to run concurrently; `log` is shared 2656 // between all the goroutines and thus all tests, so it doesn't make 2657 // sense to try to use separate `log.Scope` instances for each test. 2658 logScope := log.Scope(t) 2659 defer logScope.Close(t) 2660 2661 verbose := testing.Verbose() || log.V(1) 2662 for idx, cfg := range logicTestConfigs { 2663 paths := configPaths[idx] 2664 if len(paths) == 0 { 2665 continue 2666 } 2667 // Top-level test: one per test configuration. 2668 t.Run(cfg.name, func(t *testing.T) { 2669 if testing.Short() && cfg.skipShort { 2670 t.Skip("config skipped by -test.short") 2671 } 2672 if logicTestsConfigExclude != "" && cfg.name == logicTestsConfigExclude { 2673 t.Skip("config excluded via env var") 2674 } 2675 if logicTestsConfigFilter != "" && cfg.name != logicTestsConfigFilter { 2676 t.Skip("config does not match env var") 2677 } 2678 for _, path := range paths { 2679 path := path // Rebind range variable. 2680 // Inner test: one per file path. 2681 t.Run(filepath.Base(path), func(t *testing.T) { 2682 // Run the test in parallel, unless: 2683 // - we're printing out all of the SQL interactions, or 2684 // - we're generating testfiles, or 2685 // - we are in race mode (where we can hit a limit on alive 2686 // goroutines). 2687 if !*showSQL && !*rewriteResultsInTestfiles && !*rewriteSQL && !util.RaceEnabled { 2688 // Skip parallelizing tests that use the kv-batch-size directive since 2689 // the batch size is a global variable. 2690 // TODO(jordan, radu): make sqlbase.kvBatchSize non-global to fix this. 2691 if filepath.Base(path) != "select_index_span_ranges" { 2692 t.Parallel() // SAFE FOR TESTING (this comments satisfies the linter) 2693 } 2694 } 2695 rng, _ := randutil.NewPseudoRand() 2696 lt := logicTest{ 2697 rootT: t, 2698 verbose: verbose, 2699 perErrorSummary: make(map[string][]string), 2700 rng: rng, 2701 randomizedVectorizedBatchSize: randomizedVectorizedBatchSize, 2702 } 2703 if *printErrorSummary { 2704 defer lt.printErrorSummary() 2705 } 2706 lt.setup(cfg, serverArgs) 2707 lt.runFile(path, cfg) 2708 2709 progress.Lock() 2710 defer progress.Unlock() 2711 progress.total += lt.progress 2712 progress.totalFail += lt.failures 2713 progress.totalUnsupported += lt.unsupported 2714 now := timeutil.Now() 2715 if now.Sub(progress.lastProgress) >= 2*time.Second { 2716 progress.lastProgress = now 2717 lt.outf("--- total progress: %d statements/queries", progress.total) 2718 } 2719 }) 2720 } 2721 }) 2722 } 2723 2724 unsupportedMsg := "" 2725 if progress.totalUnsupported > 0 { 2726 unsupportedMsg = fmt.Sprintf(", ignored %d unsupported queries", progress.totalUnsupported) 2727 } 2728 2729 if verbose { 2730 fmt.Printf("--- total: %d tests, %d failures%s\n", 2731 progress.total, progress.totalFail, unsupportedMsg, 2732 ) 2733 } 2734 } 2735 2736 type errorSummaryEntry struct { 2737 errmsg string 2738 sql []string 2739 } 2740 type errorSummary []errorSummaryEntry 2741 2742 func (e errorSummary) Len() int { return len(e) } 2743 func (e errorSummary) Less(i, j int) bool { 2744 if len(e[i].sql) == len(e[j].sql) { 2745 return e[i].errmsg < e[j].errmsg 2746 } 2747 return len(e[i].sql) < len(e[j].sql) 2748 } 2749 func (e errorSummary) Swap(i, j int) { 2750 t := e[i] 2751 e[i] = e[j] 2752 e[j] = t 2753 } 2754 2755 // printErrorSummary shows the final per-error list of failing queries when 2756 // -allow-prepare-fail or -flex-types are specified. 2757 func (t *logicTest) printErrorSummary() { 2758 if t.unsupported == 0 { 2759 return 2760 } 2761 2762 t.outf("--- summary of ignored errors:") 2763 summary := make(errorSummary, len(t.perErrorSummary)) 2764 i := 0 2765 for errmsg, sql := range t.perErrorSummary { 2766 summary[i] = errorSummaryEntry{errmsg: errmsg, sql: sql} 2767 } 2768 sort.Sort(summary) 2769 for _, s := range summary { 2770 t.outf("%s (%d entries)", s.errmsg, len(s.sql)) 2771 var buf bytes.Buffer 2772 for _, q := range s.sql { 2773 buf.WriteByte('\t') 2774 buf.WriteString(strings.Replace(q, "\n", "\n\t", -1)) 2775 } 2776 t.outf("%s", buf.String()) 2777 } 2778 } 2779 2780 // shortenString cuts its argument on the right so that it more likely 2781 // fits onto the developer's screen. The behavior can be disabled by 2782 // the command-line flag "-full-messages". 2783 func shortenString(msg string) string { 2784 if *fullMessages { 2785 return msg 2786 } 2787 2788 shortened := false 2789 2790 nlPos := strings.IndexRune(msg, '\n') 2791 if nlPos >= 0 { 2792 shortened = true 2793 msg = msg[:nlPos] 2794 } 2795 2796 if len(msg) > 80 { 2797 shortened = true 2798 msg = msg[:80] 2799 } 2800 2801 if shortened { 2802 msg = msg + "..." 2803 } 2804 2805 return msg 2806 } 2807 2808 // simplifyError condenses long error strings to the shortest form 2809 // that still explains the origin of the error. 2810 func simplifyError(msg string) (string, string) { 2811 prefix := strings.Split(msg, ": ") 2812 2813 // Split: "a: b: c"-> "a: b", "c" 2814 expected := "" 2815 if len(prefix) > 1 { 2816 expected = prefix[len(prefix)-1] 2817 prefix = prefix[:len(prefix)-1] 2818 } 2819 2820 // Simplify: "a: b: c: d" -> "a: d" 2821 if !*fullMessages && len(prefix) > 2 { 2822 prefix[1] = prefix[len(prefix)-1] 2823 prefix = prefix[:2] 2824 } 2825 2826 // Mark the error message as shortened if necessary. 2827 if expected != "" { 2828 prefix = append(prefix, "...") 2829 } 2830 2831 return strings.Join(prefix, ": "), expected 2832 } 2833 2834 // signalIgnoredError registers a failing but ignored query. 2835 func (t *logicTest) signalIgnoredError(err error, pos string, sql string) { 2836 t.unsupported++ 2837 2838 if !*printErrorSummary { 2839 return 2840 } 2841 2842 // Save the error for later reporting. 2843 errmsg, expected := simplifyError(fmt.Sprintf("%s", err)) 2844 var buf bytes.Buffer 2845 fmt.Fprintf(&buf, "-- %s (%s)\n%s", pos, shortenString(expected), shortenString(sql+";")) 2846 errmsg = shortenString(errmsg) 2847 t.perErrorSummary[errmsg] = append(t.perErrorSummary[errmsg], buf.String()) 2848 } 2849 2850 // Error is a wrapper around testing.T.Error that handles printing the per-query 2851 // "FAIL" marker when -show-sql is set. It also registers the error to the 2852 // failure counter. 2853 func (t *logicTest) Error(args ...interface{}) { 2854 t.t().Helper() 2855 if *showSQL { 2856 t.outf("\t-- FAIL") 2857 } 2858 log.Errorf(context.Background(), "\n%s", fmt.Sprint(args...)) 2859 t.t().Error("\n", fmt.Sprint(args...)) 2860 t.failures++ 2861 } 2862 2863 // Errorf is a wrapper around testing.T.Errorf that handles printing the 2864 // per-query "FAIL" marker when -show-sql is set. It also registers the error to 2865 // the failure counter. 2866 func (t *logicTest) Errorf(format string, args ...interface{}) { 2867 t.t().Helper() 2868 if *showSQL { 2869 t.outf("\t-- FAIL") 2870 } 2871 log.Errorf(context.Background(), format, args...) 2872 t.t().Errorf("\n"+format, args...) 2873 t.failures++ 2874 } 2875 2876 // Fatal is a wrapper around testing.T.Fatal that ensures the fatal error message 2877 // is printed on its own line when -show-sql is set. 2878 func (t *logicTest) Fatal(args ...interface{}) { 2879 t.t().Helper() 2880 if *showSQL { 2881 fmt.Println() 2882 } 2883 log.Errorf(context.Background(), "%s", fmt.Sprint(args...)) 2884 t.t().Logf("\n%s:%d: error while processing", t.curPath, t.curLineNo) 2885 t.t().Fatal(args...) 2886 } 2887 2888 // Fatalf is a wrapper around testing.T.Fatalf that ensures the fatal error 2889 // message is printed on its own line when -show-sql is set. 2890 func (t *logicTest) Fatalf(format string, args ...interface{}) { 2891 if *showSQL { 2892 fmt.Println() 2893 } 2894 log.Errorf(context.Background(), format, args...) 2895 t.t().Logf("\n%s:%d: error while processing", t.curPath, t.curLineNo) 2896 t.t().Fatalf(format, args...) 2897 } 2898 2899 // finishOne marks completion of a single test. It handles 2900 // printing the success marker then -show-sql is set. 2901 func (t *logicTest) finishOne(msg string) { 2902 if *showSQL { 2903 t.outf("\t-- %s;", msg) 2904 } 2905 } 2906 2907 // printCompletion reports on the completion of all tests in a given 2908 // input file. 2909 func (t *logicTest) printCompletion(path string, config testClusterConfig) { 2910 unsupportedMsg := "" 2911 if t.unsupported > 0 { 2912 unsupportedMsg = fmt.Sprintf(", ignored %d unsupported queries", t.unsupported) 2913 } 2914 t.outf("--- done: %s with config %s: %d tests, %d failures%s", path, config.name, 2915 t.progress, t.failures, unsupportedMsg) 2916 }