github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/sqlbase/table_test.go (about) 1 // Copyright 2015 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package sqlbase 12 13 import ( 14 "bytes" 15 "context" 16 "fmt" 17 "math" 18 "reflect" 19 "strconv" 20 "testing" 21 "time" 22 23 "github.com/cockroachdb/cockroach/pkg/base" 24 "github.com/cockroachdb/cockroach/pkg/keys" 25 "github.com/cockroachdb/cockroach/pkg/kv" 26 "github.com/cockroachdb/cockroach/pkg/roachpb" 27 "github.com/cockroachdb/cockroach/pkg/settings/cluster" 28 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 29 "github.com/cockroachdb/cockroach/pkg/sql/types" 30 "github.com/cockroachdb/cockroach/pkg/testutils" 31 "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" 32 "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" 33 "github.com/cockroachdb/cockroach/pkg/util/encoding" 34 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 35 "github.com/cockroachdb/cockroach/pkg/util/randutil" 36 "github.com/cockroachdb/cockroach/pkg/util/timeofday" 37 "github.com/cockroachdb/cockroach/pkg/util/timeutil" 38 "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" 39 "github.com/cockroachdb/errors" 40 ) 41 42 type indexKeyTest struct { 43 tableID ID 44 primaryInterleaves []ID 45 secondaryInterleaves []ID 46 primaryValues []tree.Datum // len must be at least primaryInterleaveComponents+1 47 secondaryValues []tree.Datum // len must be at least secondaryInterleaveComponents+1 48 } 49 50 func makeTableDescForTest(test indexKeyTest) (TableDescriptor, map[ColumnID]int) { 51 primaryColumnIDs := make([]ColumnID, len(test.primaryValues)) 52 secondaryColumnIDs := make([]ColumnID, len(test.secondaryValues)) 53 columns := make([]ColumnDescriptor, len(test.primaryValues)+len(test.secondaryValues)) 54 colMap := make(map[ColumnID]int, len(test.secondaryValues)) 55 for i := range columns { 56 columns[i] = ColumnDescriptor{ 57 ID: ColumnID(i + 1), 58 Type: types.Int, 59 } 60 colMap[columns[i].ID] = i 61 if i < len(test.primaryValues) { 62 primaryColumnIDs[i] = columns[i].ID 63 } else { 64 secondaryColumnIDs[i-len(test.primaryValues)] = columns[i].ID 65 66 } 67 } 68 69 makeInterleave := func(indexID IndexID, ancestorTableIDs []ID) InterleaveDescriptor { 70 var interleave InterleaveDescriptor 71 interleave.Ancestors = make([]InterleaveDescriptor_Ancestor, len(ancestorTableIDs)) 72 for j, ancestorTableID := range ancestorTableIDs { 73 interleave.Ancestors[j] = InterleaveDescriptor_Ancestor{ 74 TableID: ancestorTableID, 75 IndexID: 1, 76 SharedPrefixLen: 1, 77 } 78 } 79 return interleave 80 } 81 82 tableDesc := TableDescriptor{ 83 ID: test.tableID, 84 Columns: columns, 85 PrimaryIndex: IndexDescriptor{ 86 ID: 1, 87 ColumnIDs: primaryColumnIDs, 88 ColumnDirections: make([]IndexDescriptor_Direction, len(primaryColumnIDs)), 89 Interleave: makeInterleave(1, test.primaryInterleaves), 90 }, 91 Indexes: []IndexDescriptor{{ 92 ID: 2, 93 ColumnIDs: secondaryColumnIDs, 94 ExtraColumnIDs: primaryColumnIDs, 95 Unique: true, 96 ColumnDirections: make([]IndexDescriptor_Direction, len(secondaryColumnIDs)), 97 Interleave: makeInterleave(2, test.secondaryInterleaves), 98 }}, 99 } 100 101 return tableDesc, colMap 102 } 103 104 func decodeIndex( 105 codec keys.SQLCodec, tableDesc *TableDescriptor, index *IndexDescriptor, key []byte, 106 ) ([]tree.Datum, error) { 107 types, err := GetColumnTypes(tableDesc, index.ColumnIDs) 108 if err != nil { 109 return nil, err 110 } 111 values := make([]EncDatum, len(index.ColumnIDs)) 112 colDirs := index.ColumnDirections 113 _, ok, _, err := DecodeIndexKey(codec, tableDesc, index, types, values, colDirs, key) 114 if err != nil { 115 return nil, err 116 } 117 if !ok { 118 return nil, errors.Errorf("key did not match descriptor") 119 } 120 121 decodedValues := make([]tree.Datum, len(values)) 122 var da DatumAlloc 123 for i, value := range values { 124 err := value.EnsureDecoded(types[i], &da) 125 if err != nil { 126 return nil, err 127 } 128 decodedValues[i] = value.Datum 129 } 130 131 return decodedValues, nil 132 } 133 134 func TestIndexKey(t *testing.T) { 135 rng, _ := randutil.NewPseudoRand() 136 var a DatumAlloc 137 138 tests := []indexKeyTest{ 139 {50, nil, nil, 140 []tree.Datum{tree.NewDInt(10)}, 141 []tree.Datum{tree.NewDInt(20)}, 142 }, 143 {50, []ID{100}, nil, 144 []tree.Datum{tree.NewDInt(10), tree.NewDInt(11)}, 145 []tree.Datum{tree.NewDInt(20)}, 146 }, 147 {50, []ID{100, 200}, nil, 148 []tree.Datum{tree.NewDInt(10), tree.NewDInt(11), tree.NewDInt(12)}, 149 []tree.Datum{tree.NewDInt(20)}, 150 }, 151 {50, nil, []ID{100}, 152 []tree.Datum{tree.NewDInt(10)}, 153 []tree.Datum{tree.NewDInt(20), tree.NewDInt(21)}, 154 }, 155 {50, []ID{100}, []ID{100}, 156 []tree.Datum{tree.NewDInt(10), tree.NewDInt(11)}, 157 []tree.Datum{tree.NewDInt(20), tree.NewDInt(21)}, 158 }, 159 {50, []ID{100}, []ID{200}, 160 []tree.Datum{tree.NewDInt(10), tree.NewDInt(11)}, 161 []tree.Datum{tree.NewDInt(20), tree.NewDInt(21)}, 162 }, 163 {50, []ID{100, 200}, []ID{100, 300}, 164 []tree.Datum{tree.NewDInt(10), tree.NewDInt(11), tree.NewDInt(12)}, 165 []tree.Datum{tree.NewDInt(20), tree.NewDInt(21), tree.NewDInt(22)}, 166 }, 167 } 168 169 for i := 0; i < 1000; i++ { 170 var t indexKeyTest 171 172 t.primaryInterleaves = make([]ID, rng.Intn(10)) 173 for j := range t.primaryInterleaves { 174 t.primaryInterleaves[j] = ID(1 + rng.Intn(10)) 175 } 176 valuesLen := randutil.RandIntInRange(rng, len(t.primaryInterleaves)+1, len(t.primaryInterleaves)+10) 177 t.primaryValues = make([]tree.Datum, valuesLen) 178 for j := range t.primaryValues { 179 t.primaryValues[j] = RandDatum(rng, types.Int, true) 180 } 181 182 t.secondaryInterleaves = make([]ID, rng.Intn(10)) 183 for j := range t.secondaryInterleaves { 184 t.secondaryInterleaves[j] = ID(1 + rng.Intn(10)) 185 } 186 valuesLen = randutil.RandIntInRange(rng, len(t.secondaryInterleaves)+1, len(t.secondaryInterleaves)+10) 187 t.secondaryValues = make([]tree.Datum, valuesLen) 188 for j := range t.secondaryValues { 189 t.secondaryValues[j] = RandDatum(rng, types.Int, true) 190 } 191 192 tests = append(tests, t) 193 } 194 195 for i, test := range tests { 196 evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) 197 defer evalCtx.Stop(context.Background()) 198 tableDesc, colMap := makeTableDescForTest(test) 199 // Add the default family to each test, since secondary indexes support column families. 200 var ( 201 colNames []string 202 colIDs ColumnIDs 203 ) 204 for _, c := range tableDesc.Columns { 205 colNames = append(colNames, c.Name) 206 colIDs = append(colIDs, c.ID) 207 } 208 tableDesc.Families = []ColumnFamilyDescriptor{{ 209 Name: "defaultFamily", 210 ID: 0, 211 ColumnNames: colNames, 212 ColumnIDs: colIDs, 213 DefaultColumnID: colIDs[0], 214 }} 215 216 testValues := append(test.primaryValues, test.secondaryValues...) 217 218 codec := keys.SystemSQLCodec 219 primaryKeyPrefix := MakeIndexKeyPrefix(codec, &tableDesc, tableDesc.PrimaryIndex.ID) 220 primaryKey, _, err := EncodeIndexKey( 221 &tableDesc, &tableDesc.PrimaryIndex, colMap, testValues, primaryKeyPrefix) 222 if err != nil { 223 t.Fatal(err) 224 } 225 primaryValue := roachpb.MakeValueFromBytes(nil) 226 primaryIndexKV := kv.KeyValue{Key: primaryKey, Value: &primaryValue} 227 228 secondaryIndexEntry, err := EncodeSecondaryIndex( 229 codec, &tableDesc, &tableDesc.Indexes[0], colMap, testValues, true /* includeEmpty */) 230 if len(secondaryIndexEntry) != 1 { 231 t.Fatalf("expected 1 index entry, got %d. got %#v", len(secondaryIndexEntry), secondaryIndexEntry) 232 } 233 if err != nil { 234 t.Fatal(err) 235 } 236 secondaryIndexKV := kv.KeyValue{ 237 Key: secondaryIndexEntry[0].Key, 238 Value: &secondaryIndexEntry[0].Value, 239 } 240 241 checkEntry := func(index *IndexDescriptor, entry kv.KeyValue) { 242 values, err := decodeIndex(codec, &tableDesc, index, entry.Key) 243 if err != nil { 244 t.Fatal(err) 245 } 246 247 for j, value := range values { 248 testValue := testValues[colMap[index.ColumnIDs[j]]] 249 if value.Compare(evalCtx, testValue) != 0 { 250 t.Fatalf("%d: value %d got %q but expected %q", i, j, value, testValue) 251 } 252 } 253 254 indexID, _, err := DecodeIndexKeyPrefix(codec, &tableDesc, entry.Key) 255 if err != nil { 256 t.Fatal(err) 257 } 258 if indexID != index.ID { 259 t.Errorf("%d", i) 260 } 261 262 extracted, err := ExtractIndexKey(&a, codec, &tableDesc, entry) 263 if err != nil { 264 t.Fatal(err) 265 } 266 if !bytes.Equal(extracted, primaryKey) { 267 t.Errorf("%d got %s <%x>, but expected %s <%x>", i, extracted, []byte(extracted), roachpb.Key(primaryKey), primaryKey) 268 } 269 } 270 271 checkEntry(&tableDesc.PrimaryIndex, primaryIndexKV) 272 checkEntry(&tableDesc.Indexes[0], secondaryIndexKV) 273 } 274 } 275 276 type arrayEncodingTest struct { 277 name string 278 datum tree.DArray 279 encoding []byte 280 } 281 282 func TestArrayEncoding(t *testing.T) { 283 tests := []arrayEncodingTest{ 284 { 285 "empty int array", 286 tree.DArray{ 287 ParamTyp: types.Int, 288 Array: tree.Datums{}, 289 }, 290 []byte{1, 3, 0}, 291 }, { 292 "single int array", 293 tree.DArray{ 294 ParamTyp: types.Int, 295 Array: tree.Datums{tree.NewDInt(1)}, 296 }, 297 []byte{1, 3, 1, 2}, 298 }, { 299 "multiple int array", 300 tree.DArray{ 301 ParamTyp: types.Int, 302 Array: tree.Datums{tree.NewDInt(1), tree.NewDInt(2), tree.NewDInt(3)}, 303 }, 304 []byte{1, 3, 3, 2, 4, 6}, 305 }, { 306 "string array", 307 tree.DArray{ 308 ParamTyp: types.String, 309 Array: tree.Datums{tree.NewDString("foo"), tree.NewDString("bar"), tree.NewDString("baz")}, 310 }, 311 []byte{1, 6, 3, 3, 102, 111, 111, 3, 98, 97, 114, 3, 98, 97, 122}, 312 }, { 313 "name array", 314 tree.DArray{ 315 ParamTyp: types.Name, 316 Array: tree.Datums{tree.NewDName("foo"), tree.NewDName("bar"), tree.NewDName("baz")}, 317 }, 318 []byte{1, 6, 3, 3, 102, 111, 111, 3, 98, 97, 114, 3, 98, 97, 122}, 319 }, 320 { 321 "bool array", 322 tree.DArray{ 323 ParamTyp: types.Bool, 324 Array: tree.Datums{tree.MakeDBool(true), tree.MakeDBool(false)}, 325 }, 326 []byte{1, 10, 2, 10, 11}, 327 }, { 328 "array containing a single null", 329 tree.DArray{ 330 ParamTyp: types.Int, 331 Array: tree.Datums{tree.DNull}, 332 HasNulls: true, 333 }, 334 []byte{17, 3, 1, 1}, 335 }, { 336 "array containing multiple nulls", 337 tree.DArray{ 338 ParamTyp: types.Int, 339 Array: tree.Datums{tree.NewDInt(1), tree.DNull, tree.DNull}, 340 HasNulls: true, 341 }, 342 []byte{17, 3, 3, 6, 2}, 343 }, { 344 "array whose NULL bitmap spans exactly one byte", 345 tree.DArray{ 346 ParamTyp: types.Int, 347 Array: tree.Datums{ 348 tree.NewDInt(1), tree.DNull, tree.DNull, tree.NewDInt(2), tree.NewDInt(3), 349 tree.NewDInt(4), tree.NewDInt(5), tree.NewDInt(6), 350 }, 351 HasNulls: true, 352 }, 353 []byte{17, 3, 8, 6, 2, 4, 6, 8, 10, 12}, 354 }, { 355 "array whose NULL bitmap spans more than one byte", 356 tree.DArray{ 357 ParamTyp: types.Int, 358 Array: tree.Datums{ 359 tree.NewDInt(1), tree.DNull, tree.DNull, tree.NewDInt(2), tree.NewDInt(3), 360 tree.NewDInt(4), tree.NewDInt(5), tree.NewDInt(6), tree.DNull, 361 }, 362 HasNulls: true, 363 }, 364 []byte{17, 3, 9, 6, 1, 2, 4, 6, 8, 10, 12}, 365 }, 366 } 367 368 for _, test := range tests { 369 t.Run("encode "+test.name, func(t *testing.T) { 370 enc, err := encodeArray(&test.datum, nil) 371 if err != nil { 372 t.Fatal(err) 373 } 374 if !bytes.Equal(enc, test.encoding) { 375 t.Fatalf("expected %s to encode to %v, got %v", test.datum.String(), test.encoding, enc) 376 } 377 }) 378 379 t.Run("decode "+test.name, func(t *testing.T) { 380 enc := make([]byte, 0) 381 enc = append(enc, byte(len(test.encoding))) 382 enc = append(enc, test.encoding...) 383 d, _, err := decodeArray(&DatumAlloc{}, test.datum.ParamTyp, enc) 384 hasNulls := d.(*tree.DArray).HasNulls 385 if test.datum.HasNulls != hasNulls { 386 t.Fatalf("expected %v to have HasNulls=%t, got %t", enc, test.datum.HasNulls, hasNulls) 387 } 388 if err != nil { 389 t.Fatal(err) 390 } 391 evalContext := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) 392 if d.Compare(evalContext, &test.datum) != 0 { 393 t.Fatalf("expected %v to decode to %s, got %s", enc, test.datum.String(), d.String()) 394 } 395 }) 396 } 397 } 398 399 func BenchmarkArrayEncoding(b *testing.B) { 400 ary := tree.DArray{ParamTyp: types.Int, Array: tree.Datums{}} 401 for i := 0; i < 10000; i++ { 402 _ = ary.Append(tree.NewDInt(1)) 403 } 404 405 b.ResetTimer() 406 for i := 0; i < b.N; i++ { 407 _, _ = encodeArray(&ary, nil) 408 } 409 } 410 411 func TestMarshalColumnValue(t *testing.T) { 412 defer leaktest.AfterTest(t)() 413 414 tests := []struct { 415 typ *types.T 416 datum tree.Datum 417 exp roachpb.Value 418 }{ 419 { 420 typ: types.Bool, 421 datum: tree.MakeDBool(true), 422 exp: func() (v roachpb.Value) { v.SetBool(true); return }(), 423 }, 424 { 425 typ: types.Bool, 426 datum: tree.MakeDBool(false), 427 exp: func() (v roachpb.Value) { v.SetBool(false); return }(), 428 }, 429 { 430 typ: types.Int, 431 datum: tree.NewDInt(314159), 432 exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), 433 }, 434 { 435 typ: types.Float, 436 datum: tree.NewDFloat(3.14159), 437 exp: func() (v roachpb.Value) { v.SetFloat(3.14159); return }(), 438 }, 439 { 440 typ: types.Decimal, 441 datum: func() (v tree.Datum) { 442 v, err := tree.ParseDDecimal("1234567890.123456890") 443 if err != nil { 444 t.Fatalf("Unexpected error while creating expected value: %s", err) 445 } 446 return 447 }(), 448 exp: func() (v roachpb.Value) { 449 dDecimal, err := tree.ParseDDecimal("1234567890.123456890") 450 if err != nil { 451 t.Fatalf("Unexpected error while creating expected value: %s", err) 452 } 453 err = v.SetDecimal(&dDecimal.Decimal) 454 if err != nil { 455 t.Fatalf("Unexpected error while creating expected value: %s", err) 456 } 457 return 458 }(), 459 }, 460 { 461 typ: types.Date, 462 datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(314159)), 463 exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), 464 }, 465 { 466 typ: types.Date, 467 datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(math.MinInt64)), 468 exp: func() (v roachpb.Value) { v.SetInt(math.MinInt64); return }(), 469 }, 470 { 471 typ: types.Date, 472 datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(math.MaxInt64)), 473 exp: func() (v roachpb.Value) { v.SetInt(math.MaxInt64); return }(), 474 }, 475 { 476 typ: types.Time, 477 datum: tree.MakeDTime(timeofday.FromInt(314159)), 478 exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), 479 }, 480 { 481 typ: types.Timestamp, 482 datum: tree.MustMakeDTimestamp(timeutil.Unix(314159, 1000), time.Microsecond), 483 exp: func() (v roachpb.Value) { v.SetTime(timeutil.Unix(314159, 1000)); return }(), 484 }, 485 { 486 typ: types.TimestampTZ, 487 datum: tree.MustMakeDTimestampTZ(timeutil.Unix(314159, 1000), time.Microsecond), 488 exp: func() (v roachpb.Value) { v.SetTime(timeutil.Unix(314159, 1000)); return }(), 489 }, 490 { 491 typ: types.String, 492 datum: tree.NewDString("testing123"), 493 exp: func() (v roachpb.Value) { v.SetString("testing123"); return }(), 494 }, 495 { 496 typ: types.Name, 497 datum: tree.NewDName("testingname123"), 498 exp: func() (v roachpb.Value) { v.SetString("testingname123"); return }(), 499 }, 500 { 501 typ: types.Bytes, 502 datum: tree.NewDBytes(tree.DBytes([]byte{0x31, 0x41, 0x59})), 503 exp: func() (v roachpb.Value) { v.SetBytes([]byte{0x31, 0x41, 0x59}); return }(), 504 }, 505 { 506 typ: types.Uuid, 507 datum: func() (v tree.Datum) { 508 v, err := tree.ParseDUuidFromString("63616665-6630-3064-6465-616462656562") 509 if err != nil { 510 t.Fatalf("Unexpected error while creating expected value: %s", err) 511 } 512 return 513 }(), 514 exp: func() (v roachpb.Value) { 515 dUUID, err := tree.ParseDUuidFromString("63616665-6630-3064-6465-616462656562") 516 if err != nil { 517 t.Fatalf("Unexpected error while creating expected value: %s", err) 518 } 519 v.SetBytes(dUUID.GetBytes()) 520 return 521 }(), 522 }, 523 { 524 typ: types.INet, 525 datum: func() (v tree.Datum) { 526 v, err := tree.ParseDIPAddrFromINetString("192.168.0.1") 527 if err != nil { 528 t.Fatalf("Unexpected error while creating expected value: %s", err) 529 } 530 return 531 }(), 532 exp: func() (v roachpb.Value) { 533 ipAddr, err := tree.ParseDIPAddrFromINetString("192.168.0.1") 534 if err != nil { 535 t.Fatalf("Unexpected error while creating expected value: %s", err) 536 } 537 data := ipAddr.ToBuffer(nil) 538 v.SetBytes(data) 539 return 540 }(), 541 }, 542 } 543 544 for i, testCase := range tests { 545 typ := testCase.typ 546 col := ColumnDescriptor{ID: ColumnID(typ.Family() + 1), Type: typ} 547 548 if actual, err := MarshalColumnValue(&col, testCase.datum); err != nil { 549 t.Errorf("%d: unexpected error with column type %v: %v", i, typ, err) 550 } else if !reflect.DeepEqual(actual, testCase.exp) { 551 t.Errorf("%d: MarshalColumnValue() got %v, expected %v", i, actual, testCase.exp) 552 } 553 } 554 } 555 556 type interleaveTableArgs struct { 557 indexKeyArgs indexKeyTest 558 values []tree.Datum 559 } 560 561 type interleaveInfo struct { 562 tableID uint64 563 values []tree.Datum 564 equivSig []byte 565 children map[string]*interleaveInfo 566 } 567 568 func createHierarchy() map[string]*interleaveInfo { 569 return map[string]*interleaveInfo{ 570 "t1": { 571 tableID: 50, 572 values: []tree.Datum{tree.NewDInt(10)}, 573 children: map[string]*interleaveInfo{ 574 "t2": { 575 tableID: 100, 576 values: []tree.Datum{tree.NewDInt(10), tree.NewDInt(15)}, 577 }, 578 "t3": { 579 tableID: 150, 580 values: []tree.Datum{tree.NewDInt(10), tree.NewDInt(20)}, 581 children: map[string]*interleaveInfo{ 582 "t4": { 583 tableID: 20, 584 values: []tree.Datum{tree.NewDInt(10), tree.NewDInt(30)}, 585 }, 586 }, 587 }, 588 }, 589 }, 590 } 591 } 592 593 type equivSigTestCases struct { 594 name string 595 table interleaveTableArgs 596 expected [][]byte 597 } 598 599 func createEquivTCs(hierarchy map[string]*interleaveInfo) []equivSigTestCases { 600 return []equivSigTestCases{ 601 { 602 name: "NoAncestors", 603 table: interleaveTableArgs{ 604 indexKeyArgs: indexKeyTest{tableID: 50}, 605 values: []tree.Datum{tree.NewDInt(10)}, 606 }, 607 expected: [][]byte{hierarchy["t1"].equivSig}, 608 }, 609 610 { 611 name: "OneAncestor", 612 table: interleaveTableArgs{ 613 indexKeyArgs: indexKeyTest{tableID: 100, primaryInterleaves: []ID{50}}, 614 values: []tree.Datum{tree.NewDInt(10), tree.NewDInt(20)}, 615 }, 616 expected: [][]byte{hierarchy["t1"].equivSig, hierarchy["t1"].children["t2"].equivSig}, 617 }, 618 619 { 620 name: "TwoAncestors", 621 table: interleaveTableArgs{ 622 indexKeyArgs: indexKeyTest{tableID: 20, primaryInterleaves: []ID{50, 150}}, 623 values: []tree.Datum{tree.NewDInt(10), tree.NewDInt(20), tree.NewDInt(30)}, 624 }, 625 expected: [][]byte{hierarchy["t1"].equivSig, hierarchy["t1"].children["t3"].equivSig, hierarchy["t1"].children["t3"].children["t4"].equivSig}, 626 }, 627 } 628 } 629 630 // equivSignatures annotates the hierarchy with the equivalence signatures 631 // for each table and returns an in-order depth-first traversal of the 632 // equivalence signatures. 633 func equivSignatures( 634 hierarchy map[string]*interleaveInfo, parent []byte, signatures [][]byte, 635 ) [][]byte { 636 for _, info := range hierarchy { 637 // Reset the reference to the parent for every child. 638 curParent := parent 639 curParent = encoding.EncodeUvarintAscending(curParent, info.tableID) 640 // Primary ID is always 1 641 curParent = encoding.EncodeUvarintAscending(curParent, 1) 642 info.equivSig = make([]byte, len(curParent)) 643 copy(info.equivSig, curParent) 644 signatures = append(signatures, info.equivSig) 645 if len(info.children) > 0 { 646 curParent = encoding.EncodeInterleavedSentinel(curParent) 647 signatures = equivSignatures(info.children, curParent, signatures) 648 } 649 } 650 return signatures 651 } 652 653 func TestIndexKeyEquivSignature(t *testing.T) { 654 hierarchy := createHierarchy() 655 hierarchySigs := equivSignatures(hierarchy, nil /*parent*/, nil /*signatures*/) 656 // validEquivSigs is necessary for IndexKeyEquivSignatures. 657 validEquivSigs := make(map[string]int) 658 for i, sig := range hierarchySigs { 659 validEquivSigs[string(sig)] = i 660 } 661 662 // Required buffers when extracting the index key's equivalence signature. 663 var keySigBuf, keyRestBuf []byte 664 665 for _, tc := range createEquivTCs(hierarchy) { 666 t.Run(tc.name, func(t *testing.T) { 667 // We need to initialize this for makeTableDescForTest. 668 tc.table.indexKeyArgs.primaryValues = tc.table.values 669 // Setup descriptors and form an index key. 670 desc, colMap := makeTableDescForTest(tc.table.indexKeyArgs) 671 primaryKeyPrefix := MakeIndexKeyPrefix(keys.SystemSQLCodec, &desc, desc.PrimaryIndex.ID) 672 primaryKey, _, err := EncodeIndexKey( 673 &desc, &desc.PrimaryIndex, colMap, tc.table.values, primaryKeyPrefix) 674 if err != nil { 675 t.Fatal(err) 676 } 677 678 tableIdx, restKey, match, err := IndexKeyEquivSignature(primaryKey, validEquivSigs, keySigBuf, keyRestBuf) 679 if err != nil { 680 t.Fatal(err) 681 } 682 if !match { 683 t.Fatalf("expected to extract equivalence signature from index key, instead false returned") 684 } 685 686 tableSig := tc.expected[len(tc.expected)-1] 687 expectedTableIdx := validEquivSigs[string(tableSig)] 688 if expectedTableIdx != tableIdx { 689 t.Fatalf("table index returned does not match table index from validEquivSigs.\nexpected %d\nactual %d", expectedTableIdx, tableIdx) 690 } 691 692 // Column values should be at the beginning of the 693 // remaining bytes of the key. 694 colVals, null, err := EncodeColumns(desc.PrimaryIndex.ColumnIDs, desc.PrimaryIndex.ColumnDirections, colMap, tc.table.values, nil /*key*/) 695 if err != nil { 696 t.Fatal(err) 697 } 698 if null { 699 t.Fatalf("unexpected null values when encoding expected column values") 700 } 701 702 if !bytes.Equal(colVals, restKey[:len(colVals)]) { 703 t.Fatalf("missing column values from rest of key.\nexpected %v\nactual %v", colVals, restKey[:len(colVals)]) 704 } 705 706 // The remaining bytes of the key should be the same 707 // length as the primary key minus the equivalence 708 // signature bytes. 709 if len(primaryKey)-len(tableSig) != len(restKey) { 710 t.Fatalf("unexpected rest of key length, expected %d, actual %d", len(primaryKey)-len(tableSig), len(restKey)) 711 } 712 }) 713 } 714 } 715 716 // TestTableEquivSignatures verifies that TableEquivSignatures returns a slice 717 // of slice references to a table's interleave ancestors' equivalence 718 // signatures. 719 func TestTableEquivSignatures(t *testing.T) { 720 hierarchy := createHierarchy() 721 equivSignatures(hierarchy, nil /*parent*/, nil /*signatures*/) 722 723 for _, tc := range createEquivTCs(hierarchy) { 724 t.Run(tc.name, func(t *testing.T) { 725 // We need to initialize this for makeTableDescForTest. 726 tc.table.indexKeyArgs.primaryValues = tc.table.values 727 // Setup descriptors and form an index key. 728 desc, _ := makeTableDescForTest(tc.table.indexKeyArgs) 729 equivSigs, err := TableEquivSignatures(&desc, &desc.PrimaryIndex) 730 if err != nil { 731 t.Fatal(err) 732 } 733 734 if len(equivSigs) != len(tc.expected) { 735 t.Fatalf("expected %d equivalence signatures from TableEquivSignatures, actual %d", len(tc.expected), len(equivSigs)) 736 } 737 for i, sig := range equivSigs { 738 if !bytes.Equal(sig, tc.expected[i]) { 739 t.Fatalf("equivalence signatures at index %d do not match.\nexpected\t%v\nactual\t%v", i, tc.expected[i], sig) 740 } 741 } 742 }) 743 } 744 } 745 746 // TestEquivSignature verifies that invoking IndexKeyEquivSignature for an encoded index key 747 // for a given table-index pair returns the equivalent equivalence signature as 748 // that of the table-index from invoking TableEquivSignatures. 749 // It also checks that the equivalence signature is not equivalent to any other 750 // tables' equivalence signatures. 751 func TestEquivSignature(t *testing.T) { 752 for _, tc := range []struct { 753 name string 754 tables []interleaveTableArgs 755 }{ 756 { 757 name: "Simple", 758 tables: []interleaveTableArgs{ 759 { 760 indexKeyArgs: indexKeyTest{tableID: 50}, 761 values: []tree.Datum{tree.NewDInt(10)}, 762 }, 763 { 764 indexKeyArgs: indexKeyTest{tableID: 51}, 765 values: []tree.Datum{tree.NewDInt(20)}, 766 }, 767 }, 768 }, 769 770 { 771 name: "ParentAndChild", 772 tables: []interleaveTableArgs{ 773 { 774 indexKeyArgs: indexKeyTest{tableID: 50}, 775 values: []tree.Datum{tree.NewDInt(10)}, 776 }, 777 { 778 indexKeyArgs: indexKeyTest{tableID: 51, primaryInterleaves: []ID{50}}, 779 values: []tree.Datum{tree.NewDInt(10), tree.NewDInt(20)}, 780 }, 781 }, 782 }, 783 784 { 785 name: "Siblings", 786 tables: []interleaveTableArgs{ 787 { 788 indexKeyArgs: indexKeyTest{tableID: 50}, 789 values: []tree.Datum{tree.NewDInt(10)}, 790 }, 791 { 792 indexKeyArgs: indexKeyTest{tableID: 51, primaryInterleaves: []ID{50}}, 793 values: []tree.Datum{tree.NewDInt(10), tree.NewDInt(20)}, 794 }, 795 { 796 indexKeyArgs: indexKeyTest{tableID: 52, primaryInterleaves: []ID{50}}, 797 values: []tree.Datum{tree.NewDInt(30), tree.NewDInt(40)}, 798 }, 799 }, 800 }, 801 } { 802 t.Run(tc.name, func(t *testing.T) { 803 keyEquivSigs := make([][]byte, len(tc.tables)) 804 tableEquivSigs := make([][]byte, len(tc.tables)) 805 806 for i, table := range tc.tables { 807 // We need to initialize this for makeTableDescForTest. 808 table.indexKeyArgs.primaryValues = table.values 809 810 // Setup descriptors and form an index key. 811 desc, colMap := makeTableDescForTest(table.indexKeyArgs) 812 primaryKeyPrefix := MakeIndexKeyPrefix(keys.SystemSQLCodec, &desc, desc.PrimaryIndex.ID) 813 primaryKey, _, err := EncodeIndexKey( 814 &desc, &desc.PrimaryIndex, colMap, table.values, primaryKeyPrefix) 815 if err != nil { 816 t.Fatal(err) 817 } 818 819 // Extract out the table's equivalence signature. 820 tempEquivSigs, err := TableEquivSignatures(&desc, &desc.PrimaryIndex) 821 if err != nil { 822 t.Fatal(err) 823 } 824 // The last signature is this table's. 825 tableEquivSigs[i] = tempEquivSigs[len(tempEquivSigs)-1] 826 827 validEquivSigs := make(map[string]int) 828 for i, sig := range tempEquivSigs { 829 validEquivSigs[string(sig)] = i 830 } 831 // Extract out the corresponding table index 832 // of the index key's signature. 833 tableIdx, _, _, err := IndexKeyEquivSignature(primaryKey, validEquivSigs, nil /*keySigBuf*/, nil /*keyRestBuf*/) 834 if err != nil { 835 t.Fatal(err) 836 } 837 // Map the table index back to the signature. 838 keyEquivSigs[i] = tempEquivSigs[tableIdx] 839 } 840 841 for i, keySig := range keyEquivSigs { 842 for j, tableSig := range tableEquivSigs { 843 if i == j { 844 // The corresponding table should have the same 845 // equivalence signature as the one derived from the key. 846 if !bytes.Equal(keySig, tableSig) { 847 t.Fatalf("IndexKeyEquivSignature differs from equivalence signature for its table.\nKeySignature: %v\nTableSignature: %v", keySig, tableSig) 848 } 849 } else { 850 // A different table should not have 851 // the same equivalence signature. 852 if bytes.Equal(keySig, tableSig) { 853 t.Fatalf("IndexKeyEquivSignature produces equivalent signature for a different table.\nKeySignature: %v\nTableSignature: %v", keySig, tableSig) 854 } 855 } 856 } 857 } 858 859 }) 860 } 861 } 862 863 func TestAdjustStartKeyForInterleave(t *testing.T) { 864 defer leaktest.AfterTest(t)() 865 866 s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) 867 defer s.Stopper().Stop(context.Background()) 868 869 sqlutils.CreateTestInterleavedHierarchy(t, sqlDB) 870 871 // Secondary indexes with DESC direction in the last column. 872 r := sqlutils.MakeSQLRunner(sqlDB) 873 r.Exec(t, fmt.Sprintf(`CREATE INDEX pid1_desc ON %s.parent1 (pid1 DESC)`, sqlutils.TestDB)) 874 r.Exec(t, fmt.Sprintf(`CREATE INDEX child_desc ON %s.child1 (pid1, cid1, cid2 DESC) INTERLEAVE IN PARENT %s.parent1 (pid1)`, sqlutils.TestDB, sqlutils.TestDB)) 875 r.Exec(t, fmt.Sprintf(`CREATE INDEX grandchild_desc ON %s.grandchild1 (pid1, cid1, cid2, gcid1 DESC) INTERLEAVE IN PARENT %s.child1(pid1, cid1, cid2)`, sqlutils.TestDB, sqlutils.TestDB)) 876 // Index with implicit primary columns (pid1, cid2). 877 r.Exec(t, fmt.Sprintf(`CREATE INDEX child_non_unique ON %s.child1 (v, cid1)`, sqlutils.TestDB)) 878 r.Exec(t, fmt.Sprintf(`CREATE UNIQUE INDEX child_unique ON %s.child1 (v, cid1)`, sqlutils.TestDB)) 879 880 // The interleaved hierarchy is as follows: 881 // parent (pid1) 882 // child (pid1, cid1, cid2) 883 // grandchild (pid1, cid1, cid2, gcid1) 884 parent := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "parent1") 885 child := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child1") 886 grandchild := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "grandchild1") 887 888 parentDescIdx := parent.Indexes[0] 889 childDescIdx := child.Indexes[0] 890 childNonUniqueIdx := child.Indexes[1] 891 childUniqueIdx := child.Indexes[2] 892 grandchildDescIdx := grandchild.Indexes[0] 893 894 testCases := []struct { 895 index *IndexDescriptor 896 // See ShortToLongKeyFmt for how to represent a key. 897 input string 898 expected string 899 }{ 900 // NOTNULLASC can appear at the end of a start key for 901 // constraint IS NOT NULL on an ASC index (NULLs sorted first, 902 // span starts (start key) on the first non-NULL). 903 // See encodeStartConstraintAscending. 904 905 { 906 index: &parent.PrimaryIndex, 907 input: "/NOTNULLASC", 908 expected: "/NOTNULLASC", 909 }, 910 { 911 index: &child.PrimaryIndex, 912 input: "/1/#/2/NOTNULLASC", 913 expected: "/1/#/2/NOTNULLASC", 914 }, 915 { 916 index: &grandchild.PrimaryIndex, 917 input: "/1/#/2/3/#/NOTNULLASC", 918 expected: "/1/#/2/3/#/NOTNULLASC", 919 }, 920 921 { 922 index: &child.PrimaryIndex, 923 input: "/1/#/NOTNULLASC", 924 expected: "/1/#/NOTNULLASC", 925 }, 926 927 { 928 index: &grandchild.PrimaryIndex, 929 input: "/1/#/2/NOTNULLASC", 930 expected: "/1/#/2/NOTNULLASC", 931 }, 932 933 // NULLDESC can appear at the end of a start key for constraint 934 // IS NULL on a DESC index (NULLs sorted last, span starts 935 // (start key) on the first NULLs). 936 // See encodeStartConstraintDescending. 937 938 { 939 index: &parentDescIdx, 940 input: "/NULLDESC", 941 expected: "/NULLDESC", 942 }, 943 { 944 index: &childDescIdx, 945 input: "/1/#/2/NULLDESC", 946 expected: "/1/#/2/NULLDESC", 947 }, 948 { 949 index: &grandchildDescIdx, 950 input: "/1/#/2/3/#/NULLDESC", 951 expected: "/1/#/2/3/#/NULLDESC", 952 }, 953 954 { 955 index: &childDescIdx, 956 input: "/1/#/NULLDESC", 957 expected: "/1/#/NULLDESC", 958 }, 959 960 // Keys that belong to the given index (neither parent nor 961 // children keys) do not need to be tightened. 962 { 963 index: &parent.PrimaryIndex, 964 input: "/1", 965 expected: "/1", 966 }, 967 { 968 index: &child.PrimaryIndex, 969 input: "/1/#/2/3", 970 expected: "/1/#/2/3", 971 }, 972 973 // Parent keys wrt child index is not tightened. 974 { 975 index: &child.PrimaryIndex, 976 input: "/1", 977 expected: "/1", 978 }, 979 980 // Children keys wrt to parent index is tightened (pushed 981 // forwards) to the next parent key. 982 { 983 index: &parent.PrimaryIndex, 984 input: "/1/#/2/3", 985 expected: "/2", 986 }, 987 { 988 index: &child.PrimaryIndex, 989 input: "/1/#/2/3/#/4", 990 expected: "/1/#/2/4", 991 }, 992 993 // Key with len > 1 tokens. 994 { 995 index: &child.PrimaryIndex, 996 input: "/12345678901234/#/1234/1234567890/#/123/1234567", 997 expected: "/12345678901234/#/1234/1234567891", 998 }, 999 { 1000 index: &child.PrimaryIndex, 1001 input: "/12345678901234/#/d1403.2594/shelloworld/#/123/1234567", 1002 expected: "/12345678901234/#/d1403.2594/shelloworld/PrefixEnd", 1003 }, 1004 1005 // Index key with extra columns (implicit primary key columns). 1006 // We should expect two extra columns (in addition to the 1007 // two index columns). 1008 { 1009 index: &childNonUniqueIdx, 1010 input: "/2/3", 1011 expected: "/2/3", 1012 }, 1013 { 1014 index: &childNonUniqueIdx, 1015 input: "/2/3/4", 1016 expected: "/2/3/4", 1017 }, 1018 { 1019 index: &childNonUniqueIdx, 1020 input: "/2/3/4/5", 1021 expected: "/2/3/4/5", 1022 }, 1023 { 1024 index: &childNonUniqueIdx, 1025 input: "/2/3/4/5/#/10", 1026 expected: "/2/3/4/6", 1027 }, 1028 1029 // Unique indexes only include implicit columns if they have 1030 // a NULL value. 1031 { 1032 index: &childUniqueIdx, 1033 input: "/2/3", 1034 expected: "/2/3", 1035 }, 1036 { 1037 index: &childUniqueIdx, 1038 input: "/2/3/4", 1039 expected: "/2/4", 1040 }, 1041 { 1042 index: &childUniqueIdx, 1043 input: "/2/NULLASC/4", 1044 expected: "/2/NULLASC/4", 1045 }, 1046 { 1047 index: &childUniqueIdx, 1048 input: "/2/NULLASC/4/5", 1049 expected: "/2/NULLASC/4/5", 1050 }, 1051 { 1052 index: &childUniqueIdx, 1053 input: "/2/NULLASC/4/5/#/6", 1054 expected: "/2/NULLASC/4/6", 1055 }, 1056 } 1057 1058 for i, tc := range testCases { 1059 t.Run(strconv.Itoa(i), func(t *testing.T) { 1060 codec := keys.SystemSQLCodec 1061 actual := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.input)) 1062 actual, err := AdjustStartKeyForInterleave(codec, tc.index, actual) 1063 if err != nil { 1064 t.Fatal(err) 1065 } 1066 1067 expected := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.expected)) 1068 if !expected.Equal(actual) { 1069 t.Errorf("expected tightened start key %s, got %s", expected, actual) 1070 } 1071 }) 1072 } 1073 } 1074 1075 func TestAdjustEndKeyForInterleave(t *testing.T) { 1076 defer leaktest.AfterTest(t)() 1077 1078 s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) 1079 defer s.Stopper().Stop(context.Background()) 1080 1081 sqlutils.CreateTestInterleavedHierarchy(t, sqlDB) 1082 1083 // Secondary indexes with DESC direction in the last column. 1084 r := sqlutils.MakeSQLRunner(sqlDB) 1085 r.Exec(t, fmt.Sprintf(`CREATE INDEX pid1_desc ON %s.parent1 (pid1 DESC)`, sqlutils.TestDB)) 1086 r.Exec(t, fmt.Sprintf(`CREATE INDEX child_desc ON %s.child1 (pid1, cid1, cid2 DESC) INTERLEAVE IN PARENT %s.parent1 (pid1)`, sqlutils.TestDB, sqlutils.TestDB)) 1087 r.Exec(t, fmt.Sprintf(`CREATE INDEX grandchild_desc ON %s.grandchild1 (pid1, cid1, cid2, gcid1 DESC) INTERLEAVE IN PARENT %s.child1(pid1, cid1, cid2)`, sqlutils.TestDB, sqlutils.TestDB)) 1088 // Index with implicit primary columns (pid1, cid2). 1089 r.Exec(t, fmt.Sprintf(`CREATE INDEX child_non_unique ON %s.child1 (v, cid1)`, sqlutils.TestDB)) 1090 r.Exec(t, fmt.Sprintf(`CREATE UNIQUE INDEX child_unique ON %s.child1 (v, cid1)`, sqlutils.TestDB)) 1091 1092 // The interleaved hierarchy is as follows: 1093 // parent (pid1) 1094 // child (pid1, cid1, cid2) 1095 // grandchild (pid1, cid1, cid2, gcid1) 1096 parent := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "parent1") 1097 child := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child1") 1098 grandchild := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "grandchild1") 1099 1100 parentDescIdx := parent.Indexes[0] 1101 childDescIdx := child.Indexes[0] 1102 childNonUniqueIdx := child.Indexes[1] 1103 childUniqueIdx := child.Indexes[2] 1104 grandchildDescIdx := grandchild.Indexes[0] 1105 1106 testCases := []struct { 1107 table *TableDescriptor 1108 index *IndexDescriptor 1109 // See ShortToLongKeyFmt for how to represent a key. 1110 input string 1111 // If the end key is assumed to be inclusive when passed to 1112 // to AdjustEndKeyForInterleave. 1113 inclusive bool 1114 expected string 1115 }{ 1116 // NOTNULLASC can appear at the end of an end key for 1117 // constraint IS NULL on an ASC index (NULLs sorted first, 1118 // span ends (end key) right before the first non-NULL). 1119 // See encodeEndConstraintAscending. 1120 1121 { 1122 table: parent, 1123 index: &parent.PrimaryIndex, 1124 input: "/NOTNULLASC", 1125 expected: "/NULLASC/#", 1126 }, 1127 1128 { 1129 table: child, 1130 index: &child.PrimaryIndex, 1131 input: "/1/#/2/NOTNULLASC", 1132 expected: "/1/#/2/NULLASC/#", 1133 }, 1134 1135 { 1136 table: grandchild, 1137 index: &grandchild.PrimaryIndex, 1138 input: "/1/#/2/3/#/NOTNULLASC", 1139 expected: "/1/#/2/3/#/NULLASC/#", 1140 }, 1141 1142 // No change since interleaved rows cannot occur between 1143 // partial primary key columns. 1144 { 1145 table: child, 1146 index: &child.PrimaryIndex, 1147 input: "/1/#/NOTNULLASC", 1148 expected: "/1/#/NOTNULLASC", 1149 }, 1150 1151 // No change since key belongs to an ancestor. 1152 { 1153 table: grandchild, 1154 index: &grandchild.PrimaryIndex, 1155 input: "/1/#/2/NOTNULLASC", 1156 expected: "/1/#/2/NOTNULLASC", 1157 }, 1158 1159 // NOTNULLDESC can appear at the end of a start key for 1160 // constraint IS NOT NULL on a DESC index (NULLs sorted last, 1161 // span ends (end key) right after the last non-NULL). 1162 // See encodeEndConstraintDescending. 1163 1164 // No change since descending indexes are always secondary and 1165 // secondary indexes are never tightened since they cannot 1166 // have interleaved rows. 1167 1168 { 1169 table: parent, 1170 index: &parentDescIdx, 1171 input: "/NOTNULLDESC", 1172 expected: "/NOTNULLDESC", 1173 }, 1174 { 1175 table: child, 1176 index: &childDescIdx, 1177 input: "/1/#/2/NOTNULLDESC", 1178 expected: "/1/#/2/NOTNULLDESC", 1179 }, 1180 { 1181 table: grandchild, 1182 index: &grandchildDescIdx, 1183 input: "/1/#/2/3/#/NOTNULLDESC", 1184 expected: "/1/#/2/3/#/NOTNULLDESC", 1185 }, 1186 { 1187 table: grandchild, 1188 index: &grandchildDescIdx, 1189 input: "/1/#/2/NOTNULLDESC", 1190 expected: "/1/#/2/NOTNULLDESC", 1191 }, 1192 1193 // NULLASC with inclusive=true is possible with IS NULL for 1194 // ascending indexes. 1195 // See encodeEndConstraintAscending. 1196 1197 { 1198 table: parent, 1199 index: &parent.PrimaryIndex, 1200 input: "/NULLASC", 1201 inclusive: true, 1202 expected: "/NULLASC/#", 1203 }, 1204 1205 { 1206 table: child, 1207 index: &child.PrimaryIndex, 1208 input: "/1/#/2/NULLASC", 1209 inclusive: true, 1210 expected: "/1/#/2/NULLASC/#", 1211 }, 1212 1213 // Keys with all the column values of the primary key should be 1214 // tightened wrt to primary indexes since they can have 1215 // interleaved rows. 1216 1217 { 1218 table: parent, 1219 index: &parent.PrimaryIndex, 1220 input: "/1", 1221 expected: "/0/#", 1222 }, 1223 { 1224 table: parent, 1225 index: &parent.PrimaryIndex, 1226 input: "/1", 1227 inclusive: true, 1228 expected: "/1/#", 1229 }, 1230 1231 { 1232 table: child, 1233 index: &child.PrimaryIndex, 1234 input: "/1/#/2/3", 1235 expected: "/1/#/2/2/#", 1236 }, 1237 { 1238 table: child, 1239 index: &child.PrimaryIndex, 1240 input: "/1/#/2/3", 1241 inclusive: true, 1242 expected: "/1/#/2/3/#", 1243 }, 1244 1245 // Idempotency. 1246 1247 { 1248 table: parent, 1249 index: &parent.PrimaryIndex, 1250 input: "/1/#", 1251 expected: "/1/#", 1252 }, 1253 { 1254 table: child, 1255 index: &child.PrimaryIndex, 1256 input: "/1/#", 1257 expected: "/1/#", 1258 }, 1259 { 1260 table: child, 1261 index: &child.PrimaryIndex, 1262 input: "/1/#/2/2/#", 1263 expected: "/1/#/2/2/#", 1264 }, 1265 1266 // Children end keys wrt a "parent" index should be tightened 1267 // to read up to the last parent key. 1268 1269 { 1270 table: parent, 1271 index: &parent.PrimaryIndex, 1272 input: "/1/#/2/3", 1273 expected: "/1/#", 1274 }, 1275 { 1276 table: parent, 1277 index: &parent.PrimaryIndex, 1278 input: "/1/#/2/3", 1279 inclusive: true, 1280 expected: "/1/#", 1281 }, 1282 1283 { 1284 table: child, 1285 index: &child.PrimaryIndex, 1286 input: "/1/#/2/3/#/4", 1287 expected: "/1/#/2/3/#", 1288 }, 1289 { 1290 table: child, 1291 index: &child.PrimaryIndex, 1292 input: "/1/#/2/3/#/4", 1293 inclusive: true, 1294 expected: "/1/#/2/3/#", 1295 }, 1296 1297 // Parent keys wrt child keys need not be tightened. 1298 1299 { 1300 table: child, 1301 index: &child.PrimaryIndex, 1302 input: "/1", 1303 expected: "/1", 1304 }, 1305 { 1306 table: child, 1307 index: &child.PrimaryIndex, 1308 input: "/1", 1309 inclusive: true, 1310 expected: "/2", 1311 }, 1312 1313 // Keys with a partial prefix of the primary key columns 1314 // need not be tightened since no interleaving can occur after. 1315 1316 { 1317 table: child, 1318 index: &child.PrimaryIndex, 1319 input: "/1/#/2", 1320 expected: "/1/#/2", 1321 }, 1322 { 1323 table: child, 1324 index: &child.PrimaryIndex, 1325 input: "/1/#/2", 1326 inclusive: true, 1327 expected: "/1/#/3", 1328 }, 1329 1330 // Secondary indexes' end keys need not be tightened since 1331 // they cannot have interleaves. 1332 1333 { 1334 table: child, 1335 index: &childDescIdx, 1336 input: "/1/#/2/3", 1337 expected: "/1/#/2/3", 1338 }, 1339 { 1340 table: child, 1341 index: &childDescIdx, 1342 input: "/1/#/2/3", 1343 inclusive: true, 1344 expected: "/1/#/2/4", 1345 }, 1346 { 1347 table: child, 1348 index: &childDescIdx, 1349 input: "/1/#/2", 1350 expected: "/1/#/2", 1351 }, 1352 { 1353 table: child, 1354 index: &childDescIdx, 1355 input: "/1/#/2", 1356 inclusive: true, 1357 expected: "/1/#/3", 1358 }, 1359 1360 // Key with len > 1 tokens. 1361 { 1362 table: child, 1363 index: &child.PrimaryIndex, 1364 input: "/12345678901234/#/12345/12345678901234/#/123/1234567", 1365 expected: "/12345678901234/#/12345/12345678901234/#", 1366 }, 1367 1368 // Index key with extra columns (implicit primary key columns). 1369 // We should expect two extra columns (in addition to the 1370 // two index columns). 1371 { 1372 table: child, 1373 index: &childNonUniqueIdx, 1374 input: "/2/3", 1375 expected: "/2/3", 1376 }, 1377 { 1378 table: child, 1379 index: &childNonUniqueIdx, 1380 input: "/2/3/4", 1381 expected: "/2/3/4", 1382 }, 1383 { 1384 table: child, 1385 index: &childNonUniqueIdx, 1386 input: "/2/3/4/5", 1387 expected: "/2/3/4/5", 1388 }, 1389 // End key not adjusted since secondary indexes can't have 1390 // interleaved rows. 1391 { 1392 table: child, 1393 index: &childNonUniqueIdx, 1394 input: "/2/3/4/5/#/10", 1395 expected: "/2/3/4/5/#/10", 1396 }, 1397 1398 { 1399 table: child, 1400 index: &childUniqueIdx, 1401 input: "/2/3", 1402 expected: "/2/3", 1403 }, 1404 // End key not adjusted since secondary indexes can't have 1405 // interleaved rows. 1406 { 1407 table: child, 1408 index: &childUniqueIdx, 1409 input: "/2/3/4", 1410 expected: "/2/3/4", 1411 }, 1412 { 1413 table: child, 1414 index: &childUniqueIdx, 1415 input: "/2/NULLASC/4", 1416 expected: "/2/NULLASC/4", 1417 }, 1418 { 1419 table: child, 1420 index: &childUniqueIdx, 1421 input: "/2/NULLASC/4/5", 1422 expected: "/2/NULLASC/4/5", 1423 }, 1424 // End key not adjusted since secondary indexes can't have 1425 // interleaved rows. 1426 { 1427 table: child, 1428 index: &childUniqueIdx, 1429 input: "/2/NULLASC/4/5/#/6", 1430 expected: "/2/NULLASC/4/5/#/6", 1431 }, 1432 1433 // Keys with decimal values. 1434 // Not tightened since it's difficult to "go back" one logical 1435 // decimal value. 1436 { 1437 table: child, 1438 index: &child.PrimaryIndex, 1439 input: "/1/#/2/d3.4567", 1440 expected: "/1/#/2/d3.4567", 1441 }, 1442 { 1443 table: child, 1444 index: &child.PrimaryIndex, 1445 input: "/1/#/2/d3.4567", 1446 inclusive: true, 1447 expected: "/1/#/2/d3.4567/#", 1448 }, 1449 { 1450 table: child, 1451 index: &child.PrimaryIndex, 1452 input: "/1/#/2/d3.4567/#/8", 1453 expected: "/1/#/2/d3.4567/#", 1454 }, 1455 1456 // Keys with bytes values. 1457 // Not tightened since it's difficult to "go back" one logical 1458 // bytes value. 1459 { 1460 table: child, 1461 index: &child.PrimaryIndex, 1462 input: "/1/#/2/shelloworld", 1463 expected: "/1/#/2/shelloworld", 1464 }, 1465 { 1466 table: child, 1467 index: &child.PrimaryIndex, 1468 input: "/1/#/2/shelloworld", 1469 inclusive: true, 1470 expected: "/1/#/2/shelloworld/#", 1471 }, 1472 { 1473 table: child, 1474 index: &child.PrimaryIndex, 1475 input: "/1/#/2/shelloworld/#/3", 1476 expected: "/1/#/2/shelloworld/#", 1477 }, 1478 } 1479 1480 for i, tc := range testCases { 1481 t.Run(strconv.Itoa(i), func(t *testing.T) { 1482 codec := keys.SystemSQLCodec 1483 actual := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.input)) 1484 actual, err := AdjustEndKeyForInterleave(codec, tc.table, tc.index, actual, tc.inclusive) 1485 if err != nil { 1486 t.Fatal(err) 1487 } 1488 1489 expected := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.expected)) 1490 if !expected.Equal(actual) { 1491 t.Errorf("expected tightened end key %s, got %s", expected, actual) 1492 } 1493 }) 1494 } 1495 } 1496 1497 func TestDecodeTableValue(t *testing.T) { 1498 a := &DatumAlloc{} 1499 for _, tc := range []struct { 1500 in tree.Datum 1501 typ *types.T 1502 err string 1503 }{ 1504 // These test cases are not intended to be exhaustive, but rather exercise 1505 // the special casing and error handling of DecodeTableValue. 1506 {tree.DNull, types.Bool, ""}, 1507 {tree.DBoolTrue, types.Bool, ""}, 1508 {tree.NewDInt(tree.DInt(4)), types.Bool, "value type is not True or False: Int"}, 1509 {tree.DNull, types.Int, ""}, 1510 {tree.NewDInt(tree.DInt(4)), types.Int, ""}, 1511 {tree.DBoolTrue, types.Int, "decoding failed"}, 1512 } { 1513 t.Run("", func(t *testing.T) { 1514 var prefix, scratch []byte 1515 buf, err := EncodeTableValue(prefix, 0 /* colID */, tc.in, scratch) 1516 if err != nil { 1517 t.Fatal(err) 1518 } 1519 d, _, err := DecodeTableValue(a, tc.typ, buf) 1520 if !testutils.IsError(err, tc.err) { 1521 t.Fatalf("expected error %q, but got %v", tc.err, err) 1522 } else if err != nil { 1523 return 1524 } 1525 if tc.in.Compare(tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()), d) != 0 { 1526 t.Fatalf("decoded datum %[1]v (%[1]T) does not match encoded datum %[2]v (%[2]T)", d, tc.in) 1527 } 1528 }) 1529 } 1530 }