github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ccl/partitionccl/partition_test.go (about) 1 // Copyright 2017 The Cockroach Authors. 2 // 3 // Licensed as a CockroachDB Enterprise file under the Cockroach Community 4 // License (the "License"); you may not use this file except in compliance with 5 // the License. You may obtain a copy of the License at 6 // 7 // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt 8 9 package partitionccl 10 11 import ( 12 "bytes" 13 "context" 14 gosql "database/sql" 15 "fmt" 16 "math/rand" 17 "strings" 18 "testing" 19 "time" 20 21 "github.com/cockroachdb/cockroach/pkg/base" 22 "github.com/cockroachdb/cockroach/pkg/ccl/importccl" 23 "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" 24 "github.com/cockroachdb/cockroach/pkg/config" 25 "github.com/cockroachdb/cockroach/pkg/config/zonepb" 26 "github.com/cockroachdb/cockroach/pkg/keys" 27 "github.com/cockroachdb/cockroach/pkg/kv/kvserver" 28 "github.com/cockroachdb/cockroach/pkg/roachpb" 29 "github.com/cockroachdb/cockroach/pkg/server" 30 "github.com/cockroachdb/cockroach/pkg/settings/cluster" 31 "github.com/cockroachdb/cockroach/pkg/sql" 32 "github.com/cockroachdb/cockroach/pkg/sql/parser" 33 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 34 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 35 "github.com/cockroachdb/cockroach/pkg/sql/tests" 36 "github.com/cockroachdb/cockroach/pkg/sql/types" 37 "github.com/cockroachdb/cockroach/pkg/testutils" 38 "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" 39 "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" 40 "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" 41 "github.com/cockroachdb/cockroach/pkg/util" 42 "github.com/cockroachdb/cockroach/pkg/util/hlc" 43 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 44 "github.com/cockroachdb/cockroach/pkg/util/log" 45 "github.com/cockroachdb/cockroach/pkg/util/randutil" 46 "github.com/cockroachdb/errors" 47 "github.com/gogo/protobuf/proto" 48 yaml "gopkg.in/yaml.v2" 49 ) 50 51 // partitioningTest represents a single test case used in the various 52 // partitioning-related tests. 53 type partitioningTest struct { 54 // name is a name for the test, suitable for use as the subtest name. 55 name string 56 57 // schema is a full CREATE TABLE statement with a literal `%s` where the 58 // table name should be. 59 schema string 60 61 // configs are each a shorthand for a zone config, formatted as 62 // `@index_name` or `.partition_name`. Optionally a suffix of a colon and a 63 // comma-separated list of constraints may be included (`@index_name:+dc1`). 64 // These will be parsed into `parsed.subzones`. 65 configs []string 66 67 // generatedSpans is 1:1 to the output of GenerateSubzoneSpans, each 68 // formatted as `{subzone} {start}-{end}` (e.g. `@primary /1-/2`), where 69 // {subzone} is formatted identically to the test shorthand above, and 70 // {start} and {end} are formatted using our key pretty printer, but with 71 // the table removed. The end key is always specified in here (though 72 // GenerateSubzoneSpans omits it under certain conditions to save space). 73 generatedSpans []string 74 75 // scans are each a shorthand for an assertion of where data should live. 76 // The map key is the used for the `WHERE` clause of a `SELECT *` and the 77 // value is a comma separated whitelist of nodes that are allowed to serve 78 // this query. Example: `map[string]string{`b = 1`: `n2`}` means that 79 // `SELECT * FROM t WHERE b = 1` is required to be served entirely by node2. 80 // 81 // TODO(dan): These should be based on replication zone attributes instead 82 // of node IDs. 83 scans map[string]string 84 85 // The following are all filled in by `parse()`. 86 parsed struct { 87 parsed bool 88 89 // tableName is `name` but escaped for use in SQL. 90 tableName string 91 92 // createStmt is `schema` with a table name of `tableName` 93 createStmt string 94 95 // tableDesc is the TableDescriptor created by `createStmt`. 96 tableDesc *sqlbase.TableDescriptor 97 98 // zoneConfigStmt contains SQL that effects the zone configs described 99 // by `configs`. 100 zoneConfigStmts string 101 102 // subzones are the `configs` shorthand parsed into Subzones. 103 subzones []zonepb.Subzone 104 } 105 } 106 107 type repartitioningTest struct { 108 index string 109 old, new partitioningTest 110 } 111 112 // parse fills in the various fields of `partitioningTest.parsed`. 113 func (pt *partitioningTest) parse() error { 114 if pt.parsed.parsed { 115 return nil 116 } 117 118 pt.parsed.tableName = tree.NameStringP(&pt.name) 119 pt.parsed.createStmt = fmt.Sprintf(pt.schema, pt.parsed.tableName) 120 121 { 122 ctx := context.Background() 123 stmt, err := parser.ParseOne(pt.parsed.createStmt) 124 if err != nil { 125 return errors.Wrapf(err, `parsing %s`, pt.parsed.createStmt) 126 } 127 createTable, ok := stmt.AST.(*tree.CreateTable) 128 if !ok { 129 return errors.Errorf("expected *tree.CreateTable got %T", stmt) 130 } 131 st := cluster.MakeTestingClusterSettings() 132 const parentID, tableID = keys.MinUserDescID, keys.MinUserDescID + 1 133 mutDesc, err := importccl.MakeSimpleTableDescriptor( 134 ctx, st, createTable, parentID, tableID, importccl.NoFKs, hlc.UnixNano()) 135 if err != nil { 136 return err 137 } 138 pt.parsed.tableDesc = mutDesc.TableDesc() 139 if err := pt.parsed.tableDesc.ValidateTable(); err != nil { 140 return err 141 } 142 } 143 144 var zoneConfigStmts bytes.Buffer 145 // TODO(dan): Can we run all the zoneConfigStmts in a txn? 146 for _, c := range pt.configs { 147 var subzoneShort, constraints string 148 configParts := strings.Split(c, `:`) 149 switch len(configParts) { 150 case 1: 151 subzoneShort = configParts[0] 152 case 2: 153 subzoneShort, constraints = configParts[0], configParts[1] 154 default: 155 panic(errors.Errorf("unsupported config: %s", c)) 156 } 157 158 var indexName string 159 var subzone zonepb.Subzone 160 subzoneParts := strings.Split(subzoneShort, ".") 161 switch len(subzoneParts) { 162 case 1: 163 indexName = subzoneParts[0] 164 case 2: 165 if subzoneParts[0] == "" { 166 indexName = "@primary" 167 } else { 168 indexName = subzoneParts[0] 169 } 170 subzone.PartitionName = subzoneParts[1] 171 default: 172 panic(errors.Errorf("unsupported config: %s", c)) 173 } 174 if !strings.HasPrefix(indexName, "@") { 175 panic(errors.Errorf("unsupported config: %s", c)) 176 } 177 idxDesc, _, err := pt.parsed.tableDesc.FindIndexByName(indexName[1:]) 178 if err != nil { 179 return errors.Wrapf(err, "could not find index %s", indexName) 180 } 181 subzone.IndexID = uint32(idxDesc.ID) 182 if len(constraints) > 0 { 183 if subzone.PartitionName == "" { 184 fmt.Fprintf(&zoneConfigStmts, 185 `ALTER INDEX %s@%s CONFIGURE ZONE USING constraints = '[%s]';`, 186 pt.parsed.tableName, idxDesc.Name, constraints, 187 ) 188 } else { 189 fmt.Fprintf(&zoneConfigStmts, 190 `ALTER PARTITION %s OF INDEX %s@%s CONFIGURE ZONE USING constraints = '[%s]';`, 191 subzone.PartitionName, pt.parsed.tableName, idxDesc.Name, constraints, 192 ) 193 } 194 } 195 196 var parsedConstraints zonepb.ConstraintsList 197 if err := yaml.UnmarshalStrict([]byte("["+constraints+"]"), &parsedConstraints); err != nil { 198 return errors.Wrapf(err, "parsing constraints: %s", constraints) 199 } 200 subzone.Config.Constraints = parsedConstraints.Constraints 201 subzone.Config.InheritedConstraints = parsedConstraints.Inherited 202 203 pt.parsed.subzones = append(pt.parsed.subzones, subzone) 204 } 205 pt.parsed.zoneConfigStmts = zoneConfigStmts.String() 206 pt.parsed.parsed = true 207 208 return nil 209 } 210 211 // verifyScansFn returns a closure that runs the test's `scans` and returns a 212 // descriptive error if any of them fail. It is not required for `parse` to have 213 // been called. 214 func (pt *partitioningTest) verifyScansFn( 215 ctx context.Context, t *testing.T, db *gosql.DB, 216 ) func() error { 217 return func() error { 218 for where, expectedNodes := range pt.scans { 219 query := fmt.Sprintf(`SELECT count(*) FROM %s WHERE %s`, tree.NameStringP(&pt.name), where) 220 log.Infof(ctx, "query: %s", query) 221 if err := verifyScansOnNode(ctx, t, db, query, expectedNodes); err != nil { 222 if log.V(1) { 223 log.Errorf(ctx, "scan verification failed: %s", err) 224 } 225 return err 226 } 227 } 228 return nil 229 } 230 } 231 232 // allPartitioningTests returns the standard set of `partitioningTest`s used in 233 // the various partitioning tests. Most of them are curated, but the ones that 234 // make sure each column type is tested are randomized. 235 // 236 // TODO(dan): It already seems odd to only have one of these sets. The 237 // repartitioning tests only use a subset and a few entries are only present 238 // because they're interesting for the before after of a partitioning change. 239 // Revisit. 240 func allPartitioningTests(rng *rand.Rand) []partitioningTest { 241 tests := []partitioningTest{ 242 { 243 name: `unpartitioned`, 244 schema: `CREATE TABLE %s (a INT PRIMARY KEY)`, 245 }, 246 247 { 248 name: `all indexes`, 249 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT, c INT, INDEX idx1 (b), INDEX idx2 (c))`, 250 configs: []string{`@primary`, `@idx1:+n2`, `@idx2:+n3`}, 251 generatedSpans: []string{`@primary /1-/2`, `@idx1 /2-/3`, `@idx2 /3-/4`}, 252 scans: map[string]string{`b = 1`: `n2`, `c = 1`: `n3`}, 253 }, 254 { 255 name: `all indexes - shuffled`, 256 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT, c INT, INDEX idx1 (b), INDEX idx2 (c))`, 257 configs: []string{`@idx2:+n2`, `@primary`, `@idx1:+n3`}, 258 generatedSpans: []string{`@primary /1-/2`, `@idx1 /2-/3`, `@idx2 /3-/4`}, 259 scans: map[string]string{`b = 1`: `n3`, `c = 1`: `n2`}, 260 }, 261 { 262 name: `some indexes`, 263 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT, c INT, INDEX idx1 (b), INDEX idx2 (c))`, 264 configs: []string{`@primary`, `@idx2:+n2`}, 265 generatedSpans: []string{`@primary /1-/2`, `@idx2 /3-/4`}, 266 scans: map[string]string{`c = 1`: `n2`}, 267 }, 268 269 { 270 name: `single col list partitioning`, 271 schema: `CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY LIST (a) ( 272 PARTITION p3 VALUES IN (3), 273 PARTITION p4 VALUES IN (4) 274 )`, 275 configs: []string{`@primary:+n1`, `.p3:+n2`, `.p4:+n3`}, 276 generatedSpans: []string{ 277 `@primary /1-/1/3`, 278 ` .p3 /1/3-/1/4`, 279 ` .p4 /1/4-/1/5`, 280 `@primary /1/5-/2`, 281 }, 282 scans: map[string]string{`a < 3`: `n1`, `a = 3`: `n2`, `a = 4`: `n3`, `a > 4`: `n1`}, 283 }, 284 { 285 // Intentionally a little different than `single col list 286 // partitioning` for the repartitioning tests. 287 name: `single col list partitioning - DEFAULT`, 288 schema: `CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY LIST (a) ( 289 PARTITION p4 VALUES IN (4), 290 PARTITION p5 VALUES IN (5), 291 PARTITION pd VALUES IN (DEFAULT) 292 )`, 293 configs: []string{`@primary`, `.p4:+n2`, `.p5:+n3`, `.pd:+n1`}, 294 generatedSpans: []string{ 295 `.pd /1-/1/4`, 296 `.p4 /1/4-/1/5`, 297 `.p5 /1/5-/1/6`, 298 `.pd /1/6-/2`, 299 }, 300 scans: map[string]string{`a < 4`: `n1`, `a = 4`: `n2`, `a = 5`: `n3`, `a > 5`: `n1`}, 301 }, 302 { 303 name: `multi col list partitioning`, 304 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a, b) ( 305 PARTITION p34 VALUES IN ((3, 4)), 306 PARTITION p56 VALUES IN ((5, 6)), 307 PARTITION p57 VALUES IN ((5, 7)) 308 )`, 309 configs: []string{`@primary:+n1`, `.p34:+n2`, `.p56:+n3`, `.p57:+n1`}, 310 generatedSpans: []string{ 311 `@primary /1-/1/3/4`, 312 ` .p34 /1/3/4-/1/3/5`, 313 `@primary /1/3/5-/1/5/6`, 314 ` .p56 /1/5/6-/1/5/7`, 315 ` .p57 /1/5/7-/1/5/8`, 316 `@primary /1/5/8-/2`, 317 }, 318 scans: map[string]string{ 319 `(a, b) < (3, 4)`: `n1`, 320 `a = 3 AND b = 4`: `n2`, 321 `(a, b) > (3, 4) AND (a, b) < (5, 6)`: `n1`, 322 `a = 5 AND b = 6`: `n3`, 323 `a = 5 AND b = 7`: `n1`, 324 `(a, b) > (5, 7)`: `n1`, 325 }, 326 }, 327 { 328 // Intentionally a little different than `multi col list 329 // partitioning` for the repartitioning tests. 330 name: `multi col list partitioning - DEFAULT`, 331 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a, b) ( 332 PARTITION p34 VALUES IN ((3, 4)), 333 PARTITION p57 VALUES IN ((5, 7)), 334 PARTITION p58 VALUES IN ((5, 8)), 335 PARTITION p5d VALUES IN ((5, DEFAULT)) 336 )`, 337 configs: []string{`@primary:+n1`, `.p34:+n2`, `.p57:+n3`, `.p58:+n1`, `.p5d:+n2`}, 338 generatedSpans: []string{ 339 `@primary /1-/1/3/4`, 340 ` .p34 /1/3/4-/1/3/5`, 341 `@primary /1/3/5-/1/5`, 342 ` .p5d /1/5-/1/5/7`, 343 ` .p57 /1/5/7-/1/5/8`, 344 ` .p58 /1/5/8-/1/5/9`, 345 ` .p5d /1/5/9-/1/6`, 346 `@primary /1/6-/2`, 347 }, 348 scans: map[string]string{ 349 `(a, b) < (3, 4)`: `n1`, 350 `a = 3 AND b = 4`: `n2`, 351 `(a, b) > (3, 4) AND a < 5`: `n1`, 352 `a = 5 AND b < 7`: `n2`, 353 `a = 5 AND b = 7`: `n3`, 354 `a = 5 AND b = 8`: `n1`, 355 `a = 5 AND b > 8`: `n2`, 356 `a > 5`: `n1`, 357 }, 358 }, 359 { 360 name: `multi col list partitioning - DEFAULT DEFAULT`, 361 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a, b) ( 362 PARTITION p34 VALUES IN ((3, 4)), 363 PARTITION p57 VALUES IN ((5, 7)), 364 PARTITION p58 VALUES IN ((5, 8)), 365 PARTITION p5d VALUES IN ((5, DEFAULT)), 366 PARTITION pd VALUES IN ((DEFAULT, DEFAULT)) 367 )`, 368 configs: []string{`@primary`, `.p34:+n1`, `.p57:+n2`, `.p58:+n3`, `.p5d:+n1`, `.pd:+n2`}, 369 generatedSpans: []string{ 370 ` .pd /1-/1/3/4`, 371 `.p34 /1/3/4-/1/3/5`, 372 ` .pd /1/3/5-/1/5`, 373 `.p5d /1/5-/1/5/7`, 374 `.p57 /1/5/7-/1/5/8`, 375 `.p58 /1/5/8-/1/5/9`, 376 `.p5d /1/5/9-/1/6`, 377 ` .pd /1/6-/2`, 378 }, 379 scans: map[string]string{ 380 `(a, b) < (3, 4)`: `n2`, 381 `a = 3 AND b = 4`: `n1`, 382 `(a, b) > (3, 4) AND a < 5`: `n2`, 383 `a = 5 AND b < 7`: `n1`, 384 `a = 5 AND b = 7`: `n2`, 385 `a = 5 AND b = 8`: `n3`, 386 `a = 5 AND b > 8`: `n1`, 387 `a > 5`: `n2`, 388 }, 389 }, 390 { 391 // Similar to `multi col list partitioning - DEFAULT DEFAULT` but 392 // via subpartitioning instead of multi col. 393 name: `multi col list partitioning - DEFAULT DEFAULT subpartitioned`, 394 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a) ( 395 PARTITION p3 VALUES IN (3) PARTITION BY LIST (b) ( 396 PARTITION p34 VALUES IN (4) 397 ), 398 PARTITION p5 VALUES IN (5) PARTITION BY LIST (b) ( 399 PARTITION p57 VALUES IN (7), 400 PARTITION p58 VALUES IN (8), 401 PARTITION p5d VALUES IN (DEFAULT) 402 ), 403 PARTITION pd VALUES IN (DEFAULT) 404 )`, 405 configs: []string{`@primary`, `.p34:+n1`, `.p57:+n2`, `.p58:+n3`, `.p5d:+n1`, `.pd:+n2`}, 406 generatedSpans: []string{ 407 ` .pd /1-/1/3/4`, 408 `.p34 /1/3/4-/1/3/5`, 409 ` .pd /1/3/5-/1/5`, 410 `.p5d /1/5-/1/5/7`, 411 `.p57 /1/5/7-/1/5/8`, 412 `.p58 /1/5/8-/1/5/9`, 413 `.p5d /1/5/9-/1/6`, 414 ` .pd /1/6-/2`, 415 }, 416 scans: map[string]string{ 417 `(a, b) < (3, 4)`: `n2`, 418 `a = 3 AND b = 4`: `n1`, 419 `(a, b) > (3, 4) AND a < 5`: `n2`, 420 `a = 5 AND b < 7`: `n1`, 421 `a = 5 AND b = 7`: `n2`, 422 `a = 5 AND b = 8`: `n3`, 423 `a = 5 AND b > 8`: `n1`, 424 `a > 5`: `n2`, 425 }, 426 }, 427 428 { 429 name: `single col range partitioning`, 430 schema: `CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY RANGE (a) ( 431 PARTITION p3 VALUES FROM (MINVALUE) TO (3), 432 PARTITION p4 VALUES FROM (3) TO (4) 433 )`, 434 configs: []string{`@primary:+n1`, `.p3:+n2`, `.p4:+n3`}, 435 generatedSpans: []string{ 436 ` .p3 /1-/1/3`, 437 ` .p4 /1/3-/1/4`, 438 `@primary /1/4-/2`, 439 }, 440 scans: map[string]string{ 441 `a < 3`: `n2`, 442 `a >= 3 AND a < 4`: `n3`, 443 `a >= 4`: `n1`, 444 }, 445 }, 446 { 447 // If this test seems confusing, see the note on the multi-col equivalent. 448 name: `single col range partitioning - descending`, 449 schema: `CREATE TABLE %s (a INT, PRIMARY KEY (a DESC)) PARTITION BY RANGE (a) ( 450 PARTITION p4 VALUES FROM (MINVALUE) TO (4), 451 PARTITION p3 VALUES FROM (4) TO (3), 452 PARTITION px VALUES FROM (3) TO (MAXVALUE) 453 )`, 454 configs: []string{`.p4:+n1`, `.p3:+n2`, `.px:+n3`}, 455 generatedSpans: []string{ 456 `.p4 /1-/1/4`, 457 `.p3 /1/4-/1/3`, 458 `.px /1/3-/2`, 459 }, 460 scans: map[string]string{ 461 `a > 4`: `n1`, 462 `a <= 4 AND a > 3`: `n2`, 463 `a <= 3`: `n3`, 464 }, 465 }, 466 { 467 name: `sparse single col range partitioning`, 468 schema: `CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY RANGE (a) ( 469 PARTITION p1 VALUES FROM (1) TO (2), 470 PARTITION p3 VALUES FROM (3) TO (4) 471 )`, 472 configs: []string{`@primary:+n1`, `.p1:+n2`, `.p3:+n3`}, 473 generatedSpans: []string{ 474 `@primary /1-/1/1`, 475 ` .p1 /1/1-/1/2`, 476 `@primary /1/2-/1/3`, 477 ` .p3 /1/3-/1/4`, 478 `@primary /1/4-/2`, 479 }, 480 scans: map[string]string{ 481 `a < 1`: `n1`, 482 `a >= 1 AND a < 2`: `n2`, 483 `a >= 2 AND a < 3`: `n1`, 484 `a >= 3 AND a < 4`: `n3`, 485 `a > 4`: `n1`, 486 }, 487 }, 488 { 489 // Intentionally a little different than `single col range 490 // partitioning` for the repartitioning tests. 491 name: `single col range partitioning - MAXVALUE`, 492 schema: `CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY RANGE (a) ( 493 PARTITION p4 VALUES FROM (MINVALUE) TO (4), 494 PARTITION p5 VALUES FROM (4) TO (5), 495 PARTITION px VALUES FROM (5) TO (MAXVALUE) 496 )`, 497 configs: []string{`@primary`, `.p4:+n1`, `.p5:+n2`, `.px:+n3`}, 498 generatedSpans: []string{ 499 `.p4 /1-/1/4`, 500 `.p5 /1/4-/1/5`, 501 `.px /1/5-/2`, 502 }, 503 scans: map[string]string{ 504 `a < 4`: `n1`, 505 `a >= 4 AND a < 5`: `n2`, 506 `a > 5`: `n3`, 507 }, 508 }, 509 { 510 name: `multi col range partitioning`, 511 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b) ( 512 PARTITION p34 VALUES FROM (MINVALUE, MINVALUE) TO (3, 4), 513 PARTITION p56 VALUES FROM (3, 4) TO (5, 6), 514 PARTITION p57 VALUES FROM (5, 6) TO (5, 7) 515 )`, 516 configs: []string{`@primary:+n1`, `.p34:+n2`, `.p56:+n3`, `.p57:+n1`}, 517 generatedSpans: []string{ 518 ` .p34 /1-/1/3/4`, 519 ` .p56 /1/3/4-/1/5/6`, 520 ` .p57 /1/5/6-/1/5/7`, 521 `@primary /1/5/7-/2`, 522 }, 523 scans: map[string]string{ 524 `(a, b) < (3, 4)`: `n2`, 525 `(a, b) >= (3, 4) AND (a, b) < (5, 6)`: `n3`, 526 `(a, b) >= (5, 6) AND (a, b) < (5, 7)`: `n1`, 527 `(a, b) >= (5, 7)`: `n1`, 528 }, 529 }, 530 { 531 // MINVALUE and MAXVALUE are brutally confusing when used with a column 532 // stored in descending order. MINVALUE means "the value that sorts before 533 // the earliest value in the index", and so in the case of a descending 534 // INT column, represents a large *positive* integer, i.e., one greater 535 // than the maximum representable integer. Similarly, MAXVALUE represents 536 // a large *negative* integer. 537 // 538 // It's not clear that anything can be done. Switching the meaning of 539 // MINVALUE/MAXVALUE for descending columns would be quite confusing in 540 // the multi-col case. For example, in the table below, the minimum 541 // possible tuple would be (MINVALUE, MAXVALUE, MINVALUE) and the maximum 542 // possible would be (MAXVALUE, MINVALUE, MAXVALUE). Neither is exactly 543 // intuitive. Consider also that (6, MINVALUE, MINVALUE) would be invalid, 544 // as a descending MINVALUE is not equivalent to an ascending MINVALUE. 545 // How would we even describe these requirements? 546 // 547 // Better to let the meaning of MINVALUE/MAXVALUE be consistent 548 // everywhere, and document the gotcha thoroughly. 549 name: `multi col range partitioning - descending`, 550 schema: `CREATE TABLE %s (a INT, b INT, c INT, PRIMARY KEY (a, b DESC, c)) PARTITION BY RANGE (a, b, c) ( 551 PARTITION p6xx VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (6, MAXVALUE, MAXVALUE), 552 PARTITION p75n VALUES FROM (7, MINVALUE, MINVALUE) TO (7, 5, MINVALUE), 553 PARTITION pxxx VALUES FROM (7, 5, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE) 554 )`, 555 configs: []string{`.p6xx:+n1`, `.p75n:+n2`, `.pxxx:+n3`}, 556 generatedSpans: []string{ 557 `.p6xx /1-/1/7`, 558 `.p75n /1/7-/1/7/5`, 559 `.pxxx /1/7/5-/2`, 560 }, 561 scans: map[string]string{ 562 `a < 7`: `n1`, 563 `a = 7 AND b > 5`: `n2`, 564 `a > 7 OR (a = 7 AND b <= 5)`: `n3`, 565 }, 566 }, 567 { 568 name: `sparse multi col range partitioning`, 569 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b) ( 570 PARTITION p34 VALUES FROM (1, 2) TO (3, 4), 571 PARTITION p78 VALUES FROM (5, 6) TO (7, 8) 572 )`, 573 configs: []string{`@primary:+n1`, `.p34:+n2`, `.p78:+n3`}, 574 generatedSpans: []string{ 575 `@primary /1-/1/1/2`, 576 ` .p34 /1/1/2-/1/3/4`, 577 `@primary /1/3/4-/1/5/6`, 578 ` .p78 /1/5/6-/1/7/8`, 579 `@primary /1/7/8-/2`, 580 }, 581 scans: map[string]string{ 582 `(a, b) < (1, 2)`: `n1`, 583 `(a, b) >= (1, 2) AND (a, b) < (3, 4)`: `n2`, 584 `(a, b) >= (3, 4) AND (a, b) < (5, 6)`: `n1`, 585 `(a, b) >= (5, 6) AND (a, b) < (7, 8)`: `n3`, 586 `(a, b) >= (7, 8)`: `n1`, 587 }, 588 }, 589 { 590 // Intentionally a little different than `multi col range 591 // partitioning` for the repartitioning tests. 592 name: `multi col range partitioning - MAXVALUE`, 593 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b) ( 594 PARTITION p3n VALUES FROM (MINVALUE, MINVALUE) TO (3, MINVALUE), 595 PARTITION p3x VALUES FROM (3, MINVALUE) TO (3, MAXVALUE), 596 PARTITION p56 VALUES FROM (3, MAXVALUE) TO (5, 6), 597 PARTITION p57 VALUES FROM (5, 6) TO (5, 7) 598 )`, 599 configs: []string{`@primary:+n1`, `.p3n:+n2`, `.p3x:+n3`, `.p56:+n1`, `.p57:+n2`}, 600 generatedSpans: []string{ 601 ` .p3n /1-/1/3`, 602 ` .p3x /1/3-/1/4`, 603 ` .p56 /1/4-/1/5/6`, 604 ` .p57 /1/5/6-/1/5/7`, 605 `@primary /1/5/7-/2`, 606 }, 607 scans: map[string]string{ 608 `a < 3`: `n2`, 609 `a >= 3 AND a < 4`: `n3`, 610 `a >= 4 AND (a, b) < (5, 6)`: `n1`, 611 `(a, b) >= (5, 6) AND (a, b) < (5, 7)`: `n2`, 612 `(a, b) >= (5, 7)`: `n1`, 613 }, 614 }, 615 { 616 name: `multi col range partitioning - MAXVALUE MAXVALUE`, 617 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b) ( 618 PARTITION p34 VALUES FROM (MINVALUE, MINVALUE) TO (3, 4), 619 PARTITION p3x VALUES FROM (3, 4) TO (3, MAXVALUE), 620 PARTITION p56 VALUES FROM (3, MAXVALUE) TO (5, 6), 621 PARTITION p57 VALUES FROM (5, 6) TO (5, 7), 622 PARTITION pxx VALUES FROM (5, 7) TO (MAXVALUE, MAXVALUE) 623 )`, 624 configs: []string{`@primary`, `.p34:+n1`, `.p3x:+n2`, `.p56:+n3`, `.p57:+n1`, `.pxx:+n2`}, 625 generatedSpans: []string{ 626 `.p34 /1-/1/3/4`, 627 `.p3x /1/3/4-/1/4`, 628 `.p56 /1/4-/1/5/6`, 629 `.p57 /1/5/6-/1/5/7`, 630 `.pxx /1/5/7-/2`, 631 }, 632 scans: map[string]string{ 633 `(a, b) < (3, 4)`: `n1`, 634 `(a, b) >= (3, 4) AND a < 4`: `n2`, 635 `a >= 4 AND (a, b) < (5, 6)`: `n3`, 636 `(a, b) >= (5, 6) AND (a, b) < (5, 7)`: `n1`, 637 `(a, b) >= (5, 7)`: `n2`, 638 }, 639 }, 640 641 { 642 name: `list-list partitioning`, 643 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a) ( 644 PARTITION p3 VALUES IN (3) PARTITION BY LIST (b) ( 645 PARTITION p34 VALUES IN (4) 646 ), 647 PARTITION p5 VALUES IN (5) PARTITION BY LIST (b) ( 648 PARTITION p56 VALUES IN (6), 649 PARTITION p5d VALUES IN (DEFAULT) 650 ), 651 PARTITION pd VALUES IN (DEFAULT) 652 )`, 653 configs: []string{`@primary:+n1`, `.p3:+n2`, `.p34:+n3`, `.p5:+n1`, `.p56:+n2`, `.p5d:+n3`, `.pd:+n1`}, 654 generatedSpans: []string{ 655 ` .pd /1-/1/3`, 656 ` .p3 /1/3-/1/3/4`, 657 `.p34 /1/3/4-/1/3/5`, 658 ` .p3 /1/3/5-/1/4`, 659 ` .pd /1/4-/1/5`, 660 `.p5d /1/5-/1/5/6`, 661 `.p56 /1/5/6-/1/5/7`, 662 `.p5d /1/5/7-/1/6`, 663 ` .pd /1/6-/2`, 664 }, 665 scans: map[string]string{ 666 `a < 3`: `n1`, 667 `a = 3 AND b < 4`: `n2`, 668 `a = 3 AND b = 4`: `n3`, 669 `a = 3 AND b > 4`: `n2`, 670 `a > 3 AND a < 5`: `n1`, 671 `a = 5 AND b < 6`: `n3`, 672 `a = 5 AND b = 6`: `n2`, 673 `a = 5 AND b > 6`: `n3`, 674 `a > 5`: `n1`, 675 }, 676 }, 677 { 678 name: `list-range partitioning`, 679 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a) ( 680 PARTITION p3 VALUES IN (3) PARTITION BY RANGE (b) ( 681 PARTITION p34 VALUES FROM (MINVALUE) TO (4) 682 ), 683 PARTITION p5 VALUES IN (5) PARTITION BY RANGE (b) ( 684 PARTITION p56 VALUES FROM (MINVALUE) TO (6), 685 PARTITION p5d VALUES FROM (6) TO (MAXVALUE) 686 ), 687 PARTITION pd VALUES IN (DEFAULT) 688 )`, 689 configs: []string{`@primary:+n1`, `.p3:+n2`, `.p34:+n3`, `.p5:+n1`, `.p56:+n2`, `.p5d:+n3`, `.pd:+n1`}, 690 generatedSpans: []string{ 691 ` .pd /1-/1/3`, 692 `.p34 /1/3-/1/3/4`, 693 ` .p3 /1/3/4-/1/4`, 694 ` .pd /1/4-/1/5`, 695 `.p56 /1/5-/1/5/6`, 696 `.p5d /1/5/6-/1/6`, 697 ` .pd /1/6-/2`, 698 }, 699 scans: map[string]string{ 700 `a < 3`: `n1`, 701 `a = 3 AND b < 4`: `n3`, 702 `a = 3 AND b >= 4`: `n2`, 703 `a > 3 AND a < 5`: `n1`, 704 `a = 5 AND b < 6`: `n2`, 705 `a = 5 AND b >= 6`: `n3`, 706 `a > 5`: `n1`, 707 }, 708 }, 709 710 { 711 name: `inheritance - index`, 712 schema: `CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY LIST (a) ( 713 PARTITION pd VALUES IN (DEFAULT) 714 )`, 715 configs: []string{`@primary`}, 716 generatedSpans: []string{`@primary /1-/2`}, 717 }, 718 { 719 name: `inheritance - single col default`, 720 schema: `CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY LIST (a) ( 721 PARTITION p3 VALUES IN (3), 722 PARTITION pd VALUES IN (DEFAULT) 723 )`, 724 configs: []string{`@primary`, `.pd`}, 725 generatedSpans: []string{`.pd /1-/2`}, 726 }, 727 { 728 name: `inheritance - multi col default`, 729 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a, b) ( 730 PARTITION p34 VALUES IN ((3, 4)), 731 PARTITION p3d VALUES IN ((3, DEFAULT)), 732 PARTITION p56 VALUES IN ((5, 6)), 733 PARTITION p5d VALUES IN ((5, DEFAULT)) 734 )`, 735 configs: []string{`@primary`, `.p3d`, `.p56`}, 736 generatedSpans: []string{ 737 `@primary /1-/1/3`, 738 ` .p3d /1/3-/1/4`, 739 `@primary /1/4-/1/5/6`, 740 ` .p56 /1/5/6-/1/5/7`, 741 `@primary /1/5/7-/2`, 742 }, 743 }, 744 { 745 name: `inheritance - subpartitioning`, 746 schema: `CREATE TABLE %s (a INT, b INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a) ( 747 PARTITION p3 VALUES IN (3) PARTITION BY LIST (b) ( 748 PARTITION p34 VALUES IN (4), 749 PARTITION p3d VALUES IN (DEFAULT) 750 ), 751 PARTITION p5 VALUES IN (5) PARTITION BY LIST (b) ( 752 PARTITION p56 VALUES IN (6), 753 PARTITION p5d VALUES IN (DEFAULT) 754 ), 755 PARTITION p7 VALUES IN (7) PARTITION BY LIST (b) ( 756 PARTITION p78 VALUES IN (8), 757 PARTITION p7d VALUES IN (DEFAULT) 758 ) 759 )`, 760 configs: []string{`@primary`, `.p3d`, `.p56`, `.p7`}, 761 generatedSpans: []string{ 762 `@primary /1-/1/3`, 763 ` .p3d /1/3-/1/4`, 764 `@primary /1/4-/1/5/6`, 765 ` .p56 /1/5/6-/1/5/7`, 766 `@primary /1/5/7-/1/7`, 767 ` .p7 /1/7-/1/8`, 768 `@primary /1/8-/2`, 769 }, 770 }, 771 772 { 773 name: `secondary index - unpartitioned`, 774 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT, INDEX b_idx (b))`, 775 }, 776 { 777 name: `secondary index - list partitioning`, 778 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT, INDEX b_idx (b) PARTITION BY LIST (b) ( 779 PARTITION p3 VALUES IN (3), 780 PARTITION p4 VALUES IN (4) 781 ))`, 782 configs: []string{`@b_idx:+n1`, `@b_idx.p3:+n2`, `@b_idx.p4:+n3`}, 783 generatedSpans: []string{ 784 `@b_idx /2-/2/3`, 785 ` .p3 /2/3-/2/4`, 786 ` .p4 /2/4-/2/5`, 787 `@b_idx /2/5-/3`, 788 }, 789 scans: map[string]string{`b < 3`: `n1`, `b = 3`: `n2`, `b = 4`: `n3`, `b > 4`: `n1`}, 790 }, 791 { 792 // Intentionally a little different than `single col list 793 // partitioning` for the repartitioning tests. 794 name: `secondary index - list partitioning - DEFAULT`, 795 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT, INDEX b_idx (b) PARTITION BY LIST (b) ( 796 PARTITION p4 VALUES IN (4), 797 PARTITION p5 VALUES IN (5), 798 PARTITION pd VALUES IN (DEFAULT) 799 ))`, 800 configs: []string{`@b_idx`, `@b_idx.p4:+n2`, `@b_idx.p5:+n3`, `@b_idx.pd:+n1`}, 801 generatedSpans: []string{ 802 `.pd /2-/2/4`, 803 `.p4 /2/4-/2/5`, 804 `.p5 /2/5-/2/6`, 805 `.pd /2/6-/3`, 806 }, 807 scans: map[string]string{`b < 4`: `n1`, `b = 4`: `n2`, `b = 5`: `n3`, `b > 5`: `n1`}, 808 }, 809 { 810 name: `secondary index - NULL`, 811 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT, INDEX b_idx (b) PARTITION BY LIST (b) ( 812 PARTITION pl1 VALUES IN (NULL, 1), 813 PARTITION p3 VALUES IN (3) 814 ))`, 815 configs: []string{`@b_idx:+n1`, `@b_idx.pl1:+n2`, `@b_idx.p3:+n3`}, 816 generatedSpans: []string{ 817 `@b_idx /2-/2/NULL`, 818 ` .pl1 /2/NULL-/2/!NULL`, 819 `@b_idx /2/!NULL-/2/1`, 820 ` .pl1 /2/1-/2/2`, 821 `@b_idx /2/2-/2/3`, 822 ` .p3 /2/3-/2/4`, 823 `@b_idx /2/4-/3`, 824 }, 825 scans: map[string]string{ 826 `b = -1`: `n1`, 827 `b IS NULL`: `n2`, 828 `b IS NULL OR b = 1`: `n2`, 829 `b = 3`: `n3`, 830 }, 831 }, 832 833 { 834 name: `scans`, 835 schema: `CREATE TABLE %s (a INT PRIMARY KEY, b INT) PARTITION BY LIST (a) ( 836 PARTITION p3p5 VALUES IN ((3), (5)), 837 PARTITION p4 VALUES IN (4), 838 PARTITION pd VALUES IN (DEFAULT) 839 )`, 840 configs: []string{`@primary:+n1`, `.p3p5:+n2`, `.p4:+n3`, `.pd:+n1`}, 841 generatedSpans: []string{ 842 ` .pd /1-/1/3`, 843 `.p3p5 /1/3-/1/4`, 844 ` .p4 /1/4-/1/5`, 845 `.p3p5 /1/5-/1/6`, 846 ` .pd /1/6-/2`, 847 }, 848 scans: map[string]string{ 849 `a < 3`: `n1`, 850 `a = 3`: `n2`, 851 `a = 4`: `n3`, 852 `a = 5`: `n2`, 853 `a > 5`: `n1`, 854 855 `a = 3 OR a = 5`: `n2`, 856 `a IN ((3), (5))`: `n2`, 857 `(a, b) IN ((3, 7))`: `n2`, 858 `a IN (3) AND a > 2`: `n2`, 859 `a IN (3) AND a < 2`: `n2`, 860 }, 861 }, 862 } 863 864 const schemaFmt = `CREATE TABLE %%s (a %s PRIMARY KEY) PARTITION BY LIST (a) (PARTITION p VALUES IN (%s))` 865 for _, typ := range append(types.Scalar, types.AnyCollatedString) { 866 switch typ.Family() { 867 case types.JsonFamily, types.GeographyFamily, types.GeometryFamily: 868 // Not indexable. 869 continue 870 case types.CollatedStringFamily: 871 typ = types.MakeCollatedString(types.String, *sqlbase.RandCollationLocale(rng)) 872 } 873 datum := sqlbase.RandDatum(rng, typ, false /* nullOk */) 874 if datum == tree.DNull { 875 // DNull is returned by RandDatum for types.UNKNOWN or if the 876 // column type is unimplemented in RandDatum. In either case, the 877 // correct thing to do is skip this one. 878 continue 879 } 880 serializedDatum := tree.Serialize(datum) 881 // name can be "char" (with quotes), so needs to be escaped. 882 escapedName := fmt.Sprintf("%s_table", strings.Replace(typ.String(), "\"", "", -1)) 883 // schema is used in a fmt.Sprintf to fill in the table name, so we have 884 // to escape any stray %s. 885 escapedDatum := strings.Replace(serializedDatum, `%`, `%%`, -1) 886 test := partitioningTest{ 887 name: escapedName, 888 schema: fmt.Sprintf(schemaFmt, typ.SQLString(), escapedDatum), 889 configs: []string{`@primary:+n1`, `.p:+n2`}, 890 scans: map[string]string{ 891 fmt.Sprintf(`a < %s`, serializedDatum): `n1`, 892 fmt.Sprintf(`a = %s`, serializedDatum): `n2`, 893 fmt.Sprintf(`a IN (%s)`, serializedDatum): `n2`, 894 fmt.Sprintf(`a > %s`, serializedDatum): `n1`, 895 }, 896 } 897 tests = append(tests, test) 898 } 899 return tests 900 } 901 902 func allRepartitioningTests(partitioningTests []partitioningTest) ([]repartitioningTest, error) { 903 tests := []repartitioningTest{ 904 { 905 index: `primary`, 906 old: partitioningTest{name: `unpartitioned`}, 907 new: partitioningTest{name: `unpartitioned`}, 908 }, 909 { 910 index: `primary`, 911 old: partitioningTest{name: `unpartitioned`}, 912 new: partitioningTest{name: `single col list partitioning`}, 913 }, 914 { 915 index: `primary`, 916 old: partitioningTest{name: `unpartitioned`}, 917 new: partitioningTest{name: `single col list partitioning - DEFAULT`}, 918 }, 919 { 920 index: `primary`, 921 old: partitioningTest{name: `unpartitioned`}, 922 new: partitioningTest{name: `single col range partitioning`}, 923 }, 924 { 925 index: `primary`, 926 old: partitioningTest{name: `unpartitioned`}, 927 new: partitioningTest{name: `single col range partitioning - MAXVALUE`}, 928 }, 929 930 { 931 index: `primary`, 932 old: partitioningTest{name: `single col list partitioning`}, 933 new: partitioningTest{name: `single col list partitioning - DEFAULT`}, 934 }, 935 { 936 index: `primary`, 937 old: partitioningTest{name: `single col list partitioning - DEFAULT`}, 938 new: partitioningTest{name: `single col list partitioning`}, 939 }, 940 { 941 index: `primary`, 942 old: partitioningTest{name: `multi col list partitioning`}, 943 new: partitioningTest{name: `multi col list partitioning - DEFAULT`}, 944 }, 945 { 946 index: `primary`, 947 old: partitioningTest{name: `multi col list partitioning - DEFAULT`}, 948 new: partitioningTest{name: `multi col list partitioning`}, 949 }, 950 { 951 index: `primary`, 952 old: partitioningTest{name: `multi col list partitioning - DEFAULT`}, 953 new: partitioningTest{name: `multi col list partitioning - DEFAULT DEFAULT`}, 954 }, 955 { 956 index: `primary`, 957 old: partitioningTest{name: `multi col list partitioning - DEFAULT DEFAULT`}, 958 new: partitioningTest{name: `multi col list partitioning - DEFAULT`}, 959 }, 960 961 { 962 index: `primary`, 963 old: partitioningTest{name: `single col range partitioning`}, 964 new: partitioningTest{name: `single col range partitioning - MAXVALUE`}, 965 }, 966 { 967 index: `primary`, 968 old: partitioningTest{name: `single col range partitioning - MAXVALUE`}, 969 new: partitioningTest{name: `single col range partitioning`}, 970 }, 971 { 972 index: `primary`, 973 old: partitioningTest{name: `multi col range partitioning`}, 974 new: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, 975 }, 976 { 977 index: `primary`, 978 old: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, 979 new: partitioningTest{name: `multi col range partitioning`}, 980 }, 981 { 982 index: `primary`, 983 old: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, 984 new: partitioningTest{name: `multi col range partitioning - MAXVALUE MAXVALUE`}, 985 }, 986 { 987 index: `primary`, 988 old: partitioningTest{name: `multi col range partitioning - MAXVALUE MAXVALUE`}, 989 new: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, 990 }, 991 992 { 993 index: `primary`, 994 old: partitioningTest{name: `single col list partitioning`}, 995 new: partitioningTest{name: `single col range partitioning`}, 996 }, 997 { 998 index: `primary`, 999 old: partitioningTest{name: `single col range partitioning`}, 1000 new: partitioningTest{name: `single col list partitioning`}, 1001 }, 1002 1003 // TODO(dan): One repartitioning is fully implemented, these tests also 1004 // need to pass with no ccl code. 1005 { 1006 index: `primary`, 1007 old: partitioningTest{name: `single col list partitioning`}, 1008 new: partitioningTest{name: `unpartitioned`}, 1009 }, 1010 { 1011 index: `primary`, 1012 old: partitioningTest{name: `single col list partitioning - DEFAULT`}, 1013 new: partitioningTest{name: `unpartitioned`}, 1014 }, 1015 { 1016 index: `primary`, 1017 old: partitioningTest{name: `single col range partitioning`}, 1018 new: partitioningTest{name: `unpartitioned`}, 1019 }, 1020 { 1021 index: `primary`, 1022 old: partitioningTest{name: `single col range partitioning - MAXVALUE`}, 1023 new: partitioningTest{name: `unpartitioned`}, 1024 }, 1025 1026 { 1027 index: `b_idx`, 1028 old: partitioningTest{name: `secondary index - unpartitioned`}, 1029 new: partitioningTest{name: `secondary index - list partitioning`}, 1030 }, 1031 { 1032 index: `b_idx`, 1033 old: partitioningTest{name: `secondary index - list partitioning`}, 1034 new: partitioningTest{name: `secondary index - unpartitioned`}, 1035 }, 1036 { 1037 index: `b_idx`, 1038 old: partitioningTest{name: `secondary index - list partitioning`}, 1039 new: partitioningTest{name: `secondary index - list partitioning - DEFAULT`}, 1040 }, 1041 { 1042 index: `b_idx`, 1043 old: partitioningTest{name: `secondary index - list partitioning - DEFAULT`}, 1044 new: partitioningTest{name: `secondary index - list partitioning`}, 1045 }, 1046 } 1047 1048 partitioningTestsByName := make(map[string]partitioningTest, len(partitioningTests)) 1049 for _, partitioningTest := range partitioningTests { 1050 partitioningTestsByName[partitioningTest.name] = partitioningTest 1051 } 1052 for i := range tests { 1053 t, ok := partitioningTestsByName[tests[i].old.name] 1054 if !ok { 1055 return nil, errors.Errorf("unknown partitioning test: %s", tests[i].old.name) 1056 } 1057 tests[i].old = t 1058 if err := tests[i].old.parse(); err != nil { 1059 return nil, err 1060 } 1061 1062 t, ok = partitioningTestsByName[tests[i].new.name] 1063 if !ok { 1064 return nil, errors.Errorf("unknown partitioning test: %s", tests[i].new.name) 1065 } 1066 tests[i].new = t 1067 if err := tests[i].new.parse(); err != nil { 1068 return nil, err 1069 } 1070 } 1071 1072 return tests, nil 1073 } 1074 1075 func verifyScansOnNode( 1076 ctx context.Context, t *testing.T, db *gosql.DB, query string, node string, 1077 ) error { 1078 // TODO(dan): This is a stopgap. At some point we should have a syntax for 1079 // doing this directly (running a query and getting back the nodes it ran on 1080 // and attributes/localities of those nodes). Users will also want this to 1081 // be sure their partitioning is working. 1082 conn, err := db.Conn(ctx) 1083 if err != nil { 1084 t.Fatalf("failed to create conn: %v", err) 1085 } 1086 sqlDB := sqlutils.MakeSQLRunner(conn) 1087 defer func() { _ = conn.Close() }() 1088 sqlDB.Exec(t, fmt.Sprintf(`SET tracing = on; %s; SET tracing = off`, query)) 1089 rows := sqlDB.Query(t, `SELECT concat(tag, ' ', message) FROM [SHOW TRACE FOR SESSION]`) 1090 defer rows.Close() 1091 var scansWrongNode []string 1092 var traceLines []string 1093 var traceLine gosql.NullString 1094 for rows.Next() { 1095 if err := rows.Scan(&traceLine); err != nil { 1096 t.Fatal(err) 1097 } 1098 traceLines = append(traceLines, traceLine.String) 1099 if strings.Contains(traceLine.String, "read completed") { 1100 if strings.Contains(traceLine.String, "SystemCon") || strings.Contains(traceLine.String, "NamespaceTab") { 1101 // Ignore trace lines for the system config range (abbreviated as 1102 // "SystemCon" in pretty printing of the range descriptor). A read might 1103 // be performed to the system config range to update the table lease. 1104 // 1105 // Also ignore trace lines for the system.namespace table, which is a 1106 // system table that resides outside the system config range. (abbreviated 1107 // as "NamespaceTab" in pretty printing of the range descriptor). 1108 continue 1109 } 1110 if !strings.Contains(traceLine.String, node) { 1111 scansWrongNode = append(scansWrongNode, traceLine.String) 1112 } 1113 } 1114 } 1115 if len(scansWrongNode) > 0 { 1116 err := errors.Newf("expected to scan on %s: %s", node, query) 1117 err = errors.WithDetailf(err, "scans:\n%s", strings.Join(scansWrongNode, "\n")) 1118 var trace strings.Builder 1119 for _, traceLine := range traceLines { 1120 trace.WriteString("\n ") 1121 trace.WriteString(traceLine) 1122 } 1123 err = errors.WithDetailf(err, "trace:%s", trace.String()) 1124 return err 1125 } 1126 return nil 1127 } 1128 1129 func setupPartitioningTestCluster( 1130 ctx context.Context, t testing.TB, 1131 ) (*gosql.DB, *sqlutils.SQLRunner, func()) { 1132 cfg := zonepb.DefaultZoneConfig() 1133 cfg.NumReplicas = proto.Int32(1) 1134 1135 tsArgs := func(attr string) base.TestServerArgs { 1136 return base.TestServerArgs{ 1137 Knobs: base.TestingKnobs{ 1138 Store: &kvserver.StoreTestingKnobs{ 1139 // Disable LBS because when the scan is happening at the rate it's happening 1140 // below, it's possible that one of the system ranges trigger a split. 1141 DisableLoadBasedSplitting: true, 1142 }, 1143 Server: &server.TestingKnobs{ 1144 DefaultZoneConfigOverride: &cfg, 1145 }, 1146 }, 1147 ScanInterval: 100 * time.Millisecond, 1148 StoreSpecs: []base.StoreSpec{ 1149 {InMemory: true, Attributes: roachpb.Attributes{Attrs: []string{attr}}}, 1150 }, 1151 UseDatabase: "data", 1152 } 1153 } 1154 tcArgs := base.TestClusterArgs{ServerArgsPerNode: map[int]base.TestServerArgs{ 1155 0: tsArgs("n1"), 1156 1: tsArgs("n2"), 1157 2: tsArgs("n3"), 1158 }} 1159 tc := testcluster.StartTestCluster(t, 3, tcArgs) 1160 1161 sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) 1162 sqlDB.Exec(t, `CREATE DATABASE data`) 1163 1164 // Disabling store throttling vastly speeds up rebalancing. 1165 sqlDB.Exec(t, `SET CLUSTER SETTING server.declined_reservation_timeout = '0s'`) 1166 sqlDB.Exec(t, `SET CLUSTER SETTING server.failed_reservation_timeout = '0s'`) 1167 1168 return tc.Conns[0], sqlDB, func() { 1169 tc.Stopper().Stop(context.Background()) 1170 } 1171 } 1172 1173 func TestInitialPartitioning(t *testing.T) { 1174 defer leaktest.AfterTest(t)() 1175 1176 // This test configures many sub-tests and is too slow to run under nightly 1177 // race stress. 1178 if testutils.NightlyStress() && util.RaceEnabled { 1179 t.Skip("too big for nightly stress race") 1180 } 1181 if testing.Short() { 1182 t.Skip("short") 1183 } 1184 1185 rng, _ := randutil.NewPseudoRand() 1186 testCases := allPartitioningTests(rng) 1187 1188 ctx := context.Background() 1189 db, sqlDB, cleanup := setupPartitioningTestCluster(ctx, t) 1190 defer cleanup() 1191 1192 for _, test := range testCases { 1193 if len(test.scans) == 0 { 1194 continue 1195 } 1196 t.Run(test.name, func(t *testing.T) { 1197 if err := test.parse(); err != nil { 1198 t.Fatalf("%+v", err) 1199 } 1200 sqlDB.Exec(t, test.parsed.createStmt) 1201 sqlDB.Exec(t, test.parsed.zoneConfigStmts) 1202 1203 testutils.SucceedsSoon(t, test.verifyScansFn(ctx, t, db)) 1204 }) 1205 } 1206 } 1207 1208 func TestSelectPartitionExprs(t *testing.T) { 1209 defer leaktest.AfterTest(t)() 1210 1211 // TODO(dan): PartitionExprs for range partitions is waiting on the new 1212 // range partitioning syntax. 1213 testData := partitioningTest{ 1214 name: `partition exprs`, 1215 schema: `CREATE TABLE %s ( 1216 a INT, b INT, c INT, PRIMARY KEY (a, b, c) 1217 ) PARTITION BY LIST (a, b) ( 1218 PARTITION p33p44 VALUES IN ((3, 3), (4, 4)) PARTITION BY LIST (c) ( 1219 PARTITION p335p445 VALUES IN (5), 1220 PARTITION p33dp44d VALUES IN (DEFAULT) 1221 ), 1222 PARTITION p6d VALUES IN ((6, DEFAULT)), 1223 PARTITION pdd VALUES IN ((DEFAULT, DEFAULT)) 1224 )`, 1225 } 1226 if err := testData.parse(); err != nil { 1227 t.Fatalf("%+v", err) 1228 } 1229 1230 tests := []struct { 1231 // partitions is a comma-separated list of input partitions 1232 partitions string 1233 // expr is the expected output 1234 expr string 1235 }{ 1236 {`p33p44`, `((a, b) = (3, 3)) OR ((a, b) = (4, 4))`}, 1237 {`p335p445`, `((a, b, c) = (3, 3, 5)) OR ((a, b, c) = (4, 4, 5))`}, 1238 {`p33dp44d`, `(((a, b) = (3, 3)) AND (NOT ((a, b, c) = (3, 3, 5)))) OR (((a, b) = (4, 4)) AND (NOT ((a, b, c) = (4, 4, 5))))`}, 1239 // NB See the TODO in the impl for why this next case has some clearly 1240 // unrelated `!=`s. 1241 {`p6d`, `((a,) = (6,)) AND (NOT (((a, b) = (3, 3)) OR ((a, b) = (4, 4))))`}, 1242 {`pdd`, `NOT ((((a, b) = (3, 3)) OR ((a, b) = (4, 4))) OR ((a,) = (6,)))`}, 1243 1244 {`p335p445,p6d`, `(((a, b, c) = (3, 3, 5)) OR ((a, b, c) = (4, 4, 5))) OR (((a,) = (6,)) AND (NOT (((a, b) = (3, 3)) OR ((a, b) = (4, 4)))))`}, 1245 1246 // TODO(dan): The expression simplification in this method is all done 1247 // by our normal SQL expression simplification code. Seems like it could 1248 // use some targeted work to clean these up. Ideally the following would 1249 // all simplyify to `(a, b) IN ((3, 3), (4, 4))`. Some of them work 1250 // because for every requested partition, all descendent partitions are 1251 // omitted, which is an optimization to save a little work with the side 1252 // benefit of making more of these what we want. 1253 {`p335p445,p33dp44d`, `(((a, b, c) = (3, 3, 5)) OR ((a, b, c) = (4, 4, 5))) OR ((((a, b) = (3, 3)) AND (NOT ((a, b, c) = (3, 3, 5)))) OR (((a, b) = (4, 4)) AND (NOT ((a, b, c) = (4, 4, 5)))))`}, 1254 {`p33p44,p335p445`, `((a, b) = (3, 3)) OR ((a, b) = (4, 4))`}, 1255 {`p33p44,p335p445,p33dp44d`, `((a, b) = (3, 3)) OR ((a, b) = (4, 4))`}, 1256 } 1257 1258 evalCtx := &tree.EvalContext{Codec: keys.SystemSQLCodec} 1259 for _, test := range tests { 1260 t.Run(test.partitions, func(t *testing.T) { 1261 var partNames tree.NameList 1262 for _, p := range strings.Split(test.partitions, `,`) { 1263 partNames = append(partNames, tree.Name(p)) 1264 } 1265 expr, err := selectPartitionExprs(evalCtx, testData.parsed.tableDesc, partNames) 1266 if err != nil { 1267 t.Fatalf("%+v", err) 1268 } 1269 if exprStr := expr.String(); exprStr != test.expr { 1270 t.Errorf("got\n%s\nexpected\n%s", exprStr, test.expr) 1271 } 1272 }) 1273 } 1274 t.Run("error", func(t *testing.T) { 1275 partNames := tree.NameList{`p33p44`, `nope`} 1276 _, err := selectPartitionExprs(evalCtx, testData.parsed.tableDesc, partNames) 1277 if !testutils.IsError(err, `unknown partition`) { 1278 t.Errorf(`expected "unknown partition" error got: %+v`, err) 1279 } 1280 }) 1281 } 1282 1283 func TestRepartitioning(t *testing.T) { 1284 defer leaktest.AfterTest(t)() 1285 1286 // This test configures many sub-tests and is too slow to run under nightly 1287 // race stress. 1288 if testutils.NightlyStress() && util.RaceEnabled { 1289 t.Skip() 1290 } 1291 1292 rng, _ := randutil.NewPseudoRand() 1293 testCases, err := allRepartitioningTests(allPartitioningTests(rng)) 1294 if err != nil { 1295 t.Fatalf("%+v", err) 1296 } 1297 1298 ctx := context.Background() 1299 db, sqlDB, cleanup := setupPartitioningTestCluster(ctx, t) 1300 defer cleanup() 1301 1302 for _, test := range testCases { 1303 t.Run(fmt.Sprintf("%s/%s", test.old.name, test.new.name), func(t *testing.T) { 1304 sqlDB.Exec(t, `DROP DATABASE IF EXISTS data`) 1305 sqlDB.Exec(t, `CREATE DATABASE data`) 1306 1307 { 1308 if err := test.old.parse(); err != nil { 1309 t.Fatalf("%+v", err) 1310 } 1311 sqlDB.Exec(t, test.old.parsed.createStmt) 1312 sqlDB.Exec(t, test.old.parsed.zoneConfigStmts) 1313 1314 testutils.SucceedsSoon(t, test.old.verifyScansFn(ctx, t, db)) 1315 } 1316 1317 { 1318 if err := test.new.parse(); err != nil { 1319 t.Fatalf("%+v", err) 1320 } 1321 sqlDB.Exec(t, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", test.old.parsed.tableName, test.new.parsed.tableName)) 1322 1323 testIndex, _, err := test.new.parsed.tableDesc.FindIndexByName(test.index) 1324 if err != nil { 1325 t.Fatalf("%+v", err) 1326 } 1327 1328 var repartition bytes.Buffer 1329 if testIndex.ID == test.new.parsed.tableDesc.PrimaryIndex.ID { 1330 fmt.Fprintf(&repartition, `ALTER TABLE %s `, test.new.parsed.tableName) 1331 } else { 1332 fmt.Fprintf(&repartition, `ALTER INDEX %s@%s `, test.new.parsed.tableName, testIndex.Name) 1333 } 1334 if testIndex.Partitioning.NumColumns == 0 { 1335 repartition.WriteString(`PARTITION BY NOTHING`) 1336 } else { 1337 if err := sql.ShowCreatePartitioning( 1338 &sqlbase.DatumAlloc{}, keys.SystemSQLCodec, test.new.parsed.tableDesc, testIndex, 1339 &testIndex.Partitioning, &repartition, 0 /* indent */, 0, /* colOffset */ 1340 ); err != nil { 1341 t.Fatalf("%+v", err) 1342 } 1343 } 1344 sqlDB.Exec(t, repartition.String()) 1345 1346 // Verify that repartitioning removes zone configs for partitions that 1347 // have been removed. 1348 newPartitionNames := map[string]struct{}{} 1349 for _, name := range test.new.parsed.tableDesc.PartitionNames() { 1350 newPartitionNames[name] = struct{}{} 1351 } 1352 for _, row := range sqlDB.QueryStr( 1353 t, "SELECT partition_name FROM crdb_internal.zones WHERE partition_name IS NOT NULL") { 1354 partitionName := row[0] 1355 if _, ok := newPartitionNames[partitionName]; !ok { 1356 t.Errorf("zone config for removed partition %q exists after repartitioning", partitionName) 1357 } 1358 } 1359 1360 // NB: Not all old zone configurations are removed. This statement will 1361 // overwrite any with the same name and the repartitioning removes any 1362 // for partitions that no longer exist, but there could still be some 1363 // sitting around (e.g., when a repartitioning preserves a partition but 1364 // does not apply a new zone config). This is fine. 1365 sqlDB.Exec(t, test.new.parsed.zoneConfigStmts) 1366 testutils.SucceedsSoon(t, test.new.verifyScansFn(ctx, t, db)) 1367 } 1368 }) 1369 } 1370 } 1371 1372 func TestPrimaryKeyChangeZoneConfigs(t *testing.T) { 1373 defer leaktest.AfterTest(t)() 1374 1375 ctx := context.Background() 1376 params, _ := tests.CreateTestServerParams() 1377 s, sqlDB, kvDB := serverutils.StartServer(t, params) 1378 defer s.Stopper().Stop(ctx) 1379 1380 // Write a table with some partitions into the database, 1381 // and change its primary key. 1382 if _, err := sqlDB.Exec(` 1383 CREATE DATABASE t; 1384 USE t; 1385 CREATE TABLE t ( 1386 x INT PRIMARY KEY, 1387 y INT NOT NULL, 1388 z INT, 1389 w INT, 1390 INDEX i1 (z), 1391 INDEX i2 (w), 1392 FAMILY (x, y, z, w) 1393 ); 1394 ALTER INDEX t@i1 PARTITION BY LIST (z) ( 1395 PARTITION p1 VALUES IN (1) 1396 ); 1397 ALTER INDEX t@i2 PARTITION BY LIST (w) ( 1398 PARTITION p2 VALUES IN (3) 1399 ); 1400 ALTER PARTITION p1 OF INDEX t@i1 CONFIGURE ZONE USING gc.ttlseconds = 15210; 1401 ALTER PARTITION p2 OF INDEX t@i2 CONFIGURE ZONE USING gc.ttlseconds = 15213; 1402 ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) 1403 `); err != nil { 1404 t.Fatal(err) 1405 } 1406 1407 // Get the zone config corresponding to the table. 1408 table := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t") 1409 kv, err := kvDB.Get(ctx, config.MakeZoneKey(uint32(table.ID))) 1410 if err != nil { 1411 t.Fatal(err) 1412 } 1413 var zone zonepb.ZoneConfig 1414 if err := kv.ValueProto(&zone); err != nil { 1415 t.Fatal(err) 1416 } 1417 1418 // Our subzones should be spans prefixed with dropped copy of i1, 1419 // dropped copy of i2, new copy of i1, and new copy of i2. 1420 // These have ID's 2, 3, 6 and 7 respectively. 1421 expectedSpans := []roachpb.Key{ 1422 table.IndexSpan(keys.SystemSQLCodec, 2 /* indexID */).Key, 1423 table.IndexSpan(keys.SystemSQLCodec, 3 /* indexID */).Key, 1424 table.IndexSpan(keys.SystemSQLCodec, 6 /* indexID */).Key, 1425 table.IndexSpan(keys.SystemSQLCodec, 7 /* indexID */).Key, 1426 } 1427 if len(zone.SubzoneSpans) != len(expectedSpans) { 1428 t.Fatalf("expected subzones to have length %d", len(expectedSpans)) 1429 } 1430 1431 // Subzone spans have the table prefix omitted. 1432 prefix := keys.SystemSQLCodec.TablePrefix(uint32(table.ID)) 1433 for i := range expectedSpans { 1434 // Subzone spans have the table prefix omitted. 1435 expected := bytes.TrimPrefix(expectedSpans[i], prefix) 1436 if !bytes.HasPrefix(zone.SubzoneSpans[i].Key, expected) { 1437 t.Fatalf( 1438 "expected span to have prefix %s but found %s", 1439 expected, 1440 zone.SubzoneSpans[i].Key, 1441 ) 1442 } 1443 } 1444 } 1445 1446 func TestRemovePartitioningExpiredLicense(t *testing.T) { 1447 defer leaktest.AfterTest(t)() 1448 defer utilccl.TestingEnableEnterprise()() 1449 1450 ctx := context.Background() 1451 s, sqlDBRaw, _ := serverutils.StartServer(t, base.TestServerArgs{ 1452 UseDatabase: "d", 1453 }) 1454 defer s.Stopper().Stop(ctx) 1455 1456 // Create a partitioned table and index. 1457 sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw) 1458 sqlDB.Exec(t, `CREATE DATABASE d`) 1459 sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY) PARTITION BY LIST (a) ( 1460 PARTITION p1 VALUES IN (1) 1461 )`) 1462 sqlDB.Exec(t, `CREATE INDEX i ON t (a) PARTITION BY RANGE (a) ( 1463 PARTITION p34 VALUES FROM (3) TO (4) 1464 )`) 1465 sqlDB.Exec(t, `ALTER PARTITION p1 OF TABLE t CONFIGURE ZONE USING DEFAULT`) 1466 sqlDB.Exec(t, `ALTER PARTITION p34 OF INDEX t@i CONFIGURE ZONE USING DEFAULT`) 1467 sqlDB.Exec(t, `ALTER INDEX t@primary CONFIGURE ZONE USING DEFAULT`) 1468 sqlDB.Exec(t, `ALTER INDEX t@i CONFIGURE ZONE USING DEFAULT`) 1469 1470 // Remove the enterprise license. 1471 defer utilccl.TestingDisableEnterprise()() 1472 1473 const partitionErr = "use of partitions requires an enterprise license" 1474 const zoneErr = "use of replication zones on indexes or partitions requires an enterprise license" 1475 expectErr := func(q string, expErr string) { 1476 t.Helper() 1477 sqlDB.ExpectErr(t, expErr, q) 1478 } 1479 1480 // Partitions and zone configs cannot be modified without a valid license. 1481 expectErr(`ALTER TABLE t PARTITION BY LIST (a) (PARTITION p2 VALUES IN (2))`, partitionErr) 1482 expectErr(`ALTER INDEX t@i PARTITION BY RANGE (a) (PARTITION p45 VALUES FROM (4) TO (5))`, partitionErr) 1483 expectErr(`ALTER PARTITION p1 OF TABLE t CONFIGURE ZONE USING DEFAULT`, zoneErr) 1484 expectErr(`ALTER PARTITION p34 OF INDEX t@i CONFIGURE ZONE USING DEFAULT`, zoneErr) 1485 expectErr(`ALTER INDEX t@primary CONFIGURE ZONE USING DEFAULT`, zoneErr) 1486 expectErr(`ALTER INDEX t@i CONFIGURE ZONE USING DEFAULT`, zoneErr) 1487 1488 // But they can be removed. 1489 sqlDB.Exec(t, `ALTER TABLE t PARTITION BY NOTHING`) 1490 sqlDB.Exec(t, `ALTER INDEX t@i PARTITION BY NOTHING`) 1491 sqlDB.Exec(t, `ALTER INDEX t@primary CONFIGURE ZONE DISCARD`) 1492 sqlDB.Exec(t, `ALTER INDEX t@i CONFIGURE ZONE DISCARD`) 1493 1494 // Once removed, they cannot be added back. 1495 expectErr(`ALTER TABLE t PARTITION BY LIST (a) (PARTITION p2 VALUES IN (2))`, partitionErr) 1496 expectErr(`ALTER INDEX t@i PARTITION BY RANGE (a) (PARTITION p45 VALUES FROM (4) TO (5))`, partitionErr) 1497 expectErr(`ALTER INDEX t@primary CONFIGURE ZONE USING DEFAULT`, zoneErr) 1498 expectErr(`ALTER INDEX t@i CONFIGURE ZONE USING DEFAULT`, zoneErr) 1499 }