github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/libraries/doltcore/sqle/index/dolt_index.go (about) 1 // Copyright 2020-2021 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package index 16 17 import ( 18 "context" 19 "errors" 20 "fmt" 21 "sync/atomic" 22 23 "github.com/dolthub/go-mysql-server/sql" 24 "github.com/dolthub/go-mysql-server/sql/expression" 25 "github.com/dolthub/go-mysql-server/sql/fulltext" 26 sqltypes "github.com/dolthub/go-mysql-server/sql/types" 27 28 "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" 29 "github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable" 30 "github.com/dolthub/dolt/go/libraries/doltcore/schema" 31 "github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms" 32 "github.com/dolthub/dolt/go/store/datas" 33 "github.com/dolthub/dolt/go/store/hash" 34 "github.com/dolthub/dolt/go/store/pool" 35 "github.com/dolthub/dolt/go/store/prolly" 36 "github.com/dolthub/dolt/go/store/prolly/tree" 37 "github.com/dolthub/dolt/go/store/types" 38 "github.com/dolthub/dolt/go/store/val" 39 ) 40 41 const ( 42 CommitHashIndexId = "commit_hash" 43 ToCommitIndexId = "to_commit" 44 FromCommitIndexId = "from_commit" 45 ) 46 47 type DoltTableable interface { 48 DoltTable(*sql.Context) (*doltdb.Table, error) 49 DataCacheKey(*sql.Context) (doltdb.DataCacheKey, bool, error) 50 } 51 52 type DoltIndex interface { 53 sql.FilteredIndex 54 sql.OrderedIndex 55 fulltext.Index 56 Schema() schema.Schema 57 IndexSchema() schema.Schema 58 Format() *types.NomsBinFormat 59 IsPrimaryKey() bool 60 61 valueReadWriter() types.ValueReadWriter 62 63 getDurableState(*sql.Context, DoltTableable) (*durableIndexState, error) 64 coversColumns(s *durableIndexState, columns []uint64) bool 65 sqlRowConverter(*durableIndexState, []uint64) *KVToSqlRowConverter 66 lookupTags(s *durableIndexState) map[uint64]int 67 } 68 69 func NewCommitIndex(i *doltIndex) *CommitIndex { 70 return &CommitIndex{doltIndex: i} 71 } 72 73 type CommitIndex struct { 74 *doltIndex 75 } 76 77 func (p *CommitIndex) CanSupport(ranges ...sql.Range) bool { 78 var selects []string 79 for _, r := range ranges { 80 if len(r) != 1 { 81 return false 82 } 83 lb, ok := r[0].LowerBound.(sql.Below) 84 if !ok { 85 return false 86 } 87 lk, ok := lb.Key.(string) 88 if !ok { 89 return false 90 } 91 ub, ok := r[0].UpperBound.(sql.Above) 92 if !ok { 93 return false 94 } 95 uk, ok := ub.Key.(string) 96 if uk != lk { 97 return false 98 } 99 selects = append(selects, uk) 100 } 101 return true 102 } 103 104 var _ DoltIndex = (*CommitIndex)(nil) 105 106 func DoltDiffIndexesFromTable(ctx context.Context, db, tbl string, t *doltdb.Table) (indexes []sql.Index, err error) { 107 sch, err := t.GetSchema(ctx) 108 if err != nil { 109 return nil, err 110 } 111 112 // Currently, only support diffs on tables with primary keys, panic? 113 if schema.IsKeyless(sch) { 114 return nil, nil 115 } 116 117 tableRows, err := t.GetRowData(ctx) 118 if err != nil { 119 return nil, err 120 } 121 keyBld := maybeGetKeyBuilder(tableRows) 122 123 cols := sch.GetPKCols().GetColumns() 124 125 // add to_ prefix 126 toCols := make([]schema.Column, len(cols)) 127 for i, col := range cols { 128 toCols[i] = col 129 toCols[i].Name = "to_" + col.Name 130 } 131 132 // to_ columns 133 toIndex := doltIndex{ 134 id: "PRIMARY", 135 tblName: doltdb.DoltDiffTablePrefix + tbl, 136 dbName: db, 137 columns: toCols, 138 indexSch: sch, 139 tableSch: sch, 140 unique: true, 141 comment: "", 142 vrw: t.ValueReadWriter(), 143 ns: t.NodeStore(), 144 keyBld: keyBld, 145 order: sql.IndexOrderAsc, 146 constrainedToLookupExpression: false, 147 } 148 149 indexes = append(indexes, &toIndex) 150 if types.IsFormat_DOLT(t.Format()) { 151 indexes = append(indexes, NewCommitIndex(&doltIndex{ 152 id: ToCommitIndexId, 153 tblName: doltdb.DoltDiffTablePrefix + tbl, 154 dbName: db, 155 columns: []schema.Column{ 156 schema.NewColumn(ToCommitIndexId, schema.DiffCommitTag, types.StringKind, false), 157 }, 158 indexSch: sch, 159 tableSch: sch, 160 unique: true, 161 comment: "", 162 vrw: t.ValueReadWriter(), 163 ns: t.NodeStore(), 164 order: sql.IndexOrderNone, 165 constrainedToLookupExpression: false, 166 }), 167 NewCommitIndex(&doltIndex{ 168 id: FromCommitIndexId, 169 tblName: doltdb.DoltDiffTablePrefix + tbl, 170 dbName: db, 171 columns: []schema.Column{ 172 schema.NewColumn(FromCommitIndexId, schema.DiffCommitTag, types.StringKind, false), 173 }, 174 indexSch: sch, 175 tableSch: sch, 176 unique: true, 177 comment: "", 178 vrw: t.ValueReadWriter(), 179 ns: t.NodeStore(), 180 order: sql.IndexOrderNone, 181 constrainedToLookupExpression: false, 182 }), 183 ) 184 } 185 return indexes, nil 186 } 187 188 func DoltToFromCommitIndex(tbl string) sql.Index { 189 return &doltIndex{ 190 id: "commits", 191 tblName: doltdb.DoltCommitDiffTablePrefix + tbl, 192 columns: []schema.Column{ 193 schema.NewColumn(ToCommitIndexId, schema.DiffCommitTag, types.StringKind, false), 194 schema.NewColumn(FromCommitIndexId, schema.DiffCommitTag, types.StringKind, false), 195 }, 196 unique: true, 197 comment: "", 198 order: sql.IndexOrderNone, 199 constrainedToLookupExpression: false, 200 } 201 } 202 203 // MockIndex returns a sql.Index that is not backed by an actual datastore. It's useful for system tables and 204 // system table functions provide indexes but produce their rows at execution time based on the provided `IndexLookup` 205 func MockIndex(dbName, tableName, columnName string, columnType types.NomsKind, unique bool) (index *doltIndex) { 206 return &doltIndex{ 207 id: columnName, 208 tblName: tableName, 209 dbName: dbName, 210 columns: []schema.Column{ 211 schema.NewColumn(columnName, 0, columnType, false), 212 }, 213 indexSch: nil, 214 tableSch: nil, 215 unique: unique, 216 comment: "", 217 vrw: nil, 218 ns: nil, 219 order: sql.IndexOrderNone, 220 constrainedToLookupExpression: false, 221 } 222 } 223 224 func DoltCommitIndexes(dbName, tab string, db *doltdb.DoltDB, unique bool) (indexes []sql.Index, err error) { 225 if !types.IsFormat_DOLT(db.Format()) { 226 return nil, nil 227 } 228 229 return []sql.Index{ 230 NewCommitIndex(MockIndex(dbName, tab, CommitHashIndexId, types.StringKind, unique)), 231 }, nil 232 } 233 234 func DoltIndexesFromTable(ctx context.Context, db, tbl string, t *doltdb.Table) (indexes []sql.Index, err error) { 235 sch, err := t.GetSchema(ctx) 236 if err != nil { 237 return nil, err 238 } 239 240 if !schema.IsKeyless(sch) { 241 idx, err := getPrimaryKeyIndex(ctx, db, tbl, t, sch) 242 if err != nil { 243 return nil, err 244 } 245 indexes = append(indexes, idx) 246 } 247 248 for _, definition := range sch.Indexes().AllIndexes() { 249 idx, err := getSecondaryIndex(ctx, db, tbl, t, sch, definition) 250 if err != nil { 251 return nil, err 252 } 253 indexes = append(indexes, idx) 254 } 255 256 return indexes, nil 257 } 258 259 func TableHasIndex(ctx context.Context, db, tbl string, t *doltdb.Table, i sql.Index) (bool, error) { 260 sch, err := t.GetSchema(ctx) 261 if err != nil { 262 return false, err 263 } 264 265 if !schema.IsKeyless(sch) { 266 idx, err := getPrimaryKeyIndex(ctx, db, tbl, t, sch) 267 if err != nil { 268 return false, err 269 } 270 if indexesMatch(idx, i) { 271 return true, nil 272 } 273 } 274 275 for _, definition := range sch.Indexes().AllIndexes() { 276 idx, err := getSecondaryIndex(ctx, db, tbl, t, sch, definition) 277 if err != nil { 278 return false, err 279 } 280 if indexesMatch(idx, i) { 281 return true, nil 282 } 283 } 284 285 return false, nil 286 } 287 288 // indexesMatch returns whether the two index objects should be considered the same index for the purpose of a lookup, 289 // i.e. whether they have the same name and index the same table columns. 290 func indexesMatch(a sql.Index, b sql.Index) bool { 291 dia, dib := a.(*doltIndex), b.(*doltIndex) 292 if dia.isPk != dib.isPk || dia.id != dib.id { 293 return false 294 } 295 296 if len(dia.columns) != len(dib.columns) { 297 return false 298 } 299 for i := range dia.columns { 300 if dia.columns[i].Name != dib.columns[i].Name { 301 return false 302 } 303 } 304 305 return true 306 } 307 308 func DoltHistoryIndexesFromTable(ctx context.Context, db, tbl string, t *doltdb.Table, ddb *doltdb.DoltDB) ([]sql.Index, error) { 309 indexes, err := DoltIndexesFromTable(ctx, db, tbl, t) 310 if err != nil { 311 return nil, err 312 } 313 314 unorderedIndexes := make([]sql.Index, len(indexes)) 315 for i := range indexes { 316 di := indexes[i].(*doltIndex) 317 // History table indexed reads don't come back in order (iterated by commit graph first), and can include rows that 318 // weren't asked for (because the index needed may not exist at all revisions) 319 di.order = sql.IndexOrderNone 320 di.constrainedToLookupExpression = false 321 unorderedIndexes[i] = di 322 } 323 324 cmIdx, err := DoltCommitIndexes(db, tbl, ddb, false) 325 if err != nil { 326 return nil, err 327 } 328 unorderedIndexes = append(unorderedIndexes, cmIdx...) 329 330 return unorderedIndexes, nil 331 } 332 333 func getPrimaryKeyIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sch schema.Schema) (sql.Index, error) { 334 tableRows, err := t.GetRowData(ctx) 335 if err != nil { 336 return nil, err 337 } 338 keyBld := maybeGetKeyBuilder(tableRows) 339 340 cols := sch.GetPKCols().GetColumns() 341 342 vrw := t.ValueReadWriter() 343 344 return &doltIndex{ 345 id: "PRIMARY", 346 tblName: tbl, 347 dbName: db, 348 columns: cols, 349 indexSch: sch, 350 tableSch: sch, 351 unique: true, 352 isPk: true, 353 comment: "", 354 vrw: vrw, 355 ns: t.NodeStore(), 356 keyBld: keyBld, 357 order: sql.IndexOrderAsc, 358 constrainedToLookupExpression: true, 359 doltBinFormat: types.IsFormat_DOLT(vrw.Format()), 360 }, nil 361 } 362 363 func getSecondaryIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sch schema.Schema, idx schema.Index) (sql.Index, error) { 364 indexRows, err := t.GetIndexRowData(ctx, idx.Name()) 365 if err != nil { 366 return nil, err 367 } 368 keyBld := maybeGetKeyBuilder(indexRows) 369 370 cols := make([]schema.Column, idx.Count()) 371 for i, tag := range idx.IndexedColumnTags() { 372 cols[i], _ = idx.GetColumn(tag) 373 } 374 vrw := t.ValueReadWriter() 375 376 return &doltIndex{ 377 id: idx.Name(), 378 tblName: tbl, 379 dbName: db, 380 columns: cols, 381 indexSch: idx.Schema(), 382 tableSch: sch, 383 unique: idx.IsUnique(), 384 spatial: idx.IsSpatial(), 385 fulltext: idx.IsFullText(), 386 isPk: false, 387 comment: idx.Comment(), 388 vrw: vrw, 389 ns: t.NodeStore(), 390 keyBld: keyBld, 391 order: sql.IndexOrderAsc, 392 constrainedToLookupExpression: true, 393 doltBinFormat: types.IsFormat_DOLT(vrw.Format()), 394 prefixLengths: idx.PrefixLengths(), 395 fullTextProps: idx.FullTextProperties(), 396 }, nil 397 } 398 399 // ConvertFullTextToSql converts a given Full-Text schema.Index into a sql.Index. As we do not need to write to a 400 // Full-Text index, we can omit all such fields. This must not be used in any other circumstance. 401 func ConvertFullTextToSql(ctx context.Context, db, tbl string, sch schema.Schema, idx schema.Index) (sql.Index, error) { 402 cols := make([]schema.Column, idx.Count()) 403 for i, tag := range idx.IndexedColumnTags() { 404 cols[i], _ = idx.GetColumn(tag) 405 } 406 407 return &doltIndex{ 408 id: idx.Name(), 409 tblName: tbl, 410 dbName: db, 411 columns: cols, 412 indexSch: idx.Schema(), 413 tableSch: sch, 414 unique: idx.IsUnique(), 415 spatial: idx.IsSpatial(), 416 fulltext: idx.IsFullText(), 417 isPk: false, 418 comment: idx.Comment(), 419 vrw: nil, 420 ns: nil, 421 keyBld: nil, 422 order: sql.IndexOrderAsc, 423 constrainedToLookupExpression: true, 424 doltBinFormat: true, 425 prefixLengths: idx.PrefixLengths(), 426 fullTextProps: idx.FullTextProperties(), 427 }, nil 428 } 429 430 type durableIndexState struct { 431 key doltdb.DataCacheKey 432 Primary durable.Index 433 Secondary durable.Index 434 coversAllCols uint32 435 cachedLookupTags atomic.Value 436 cachedSqlRowConverter atomic.Value 437 cachedProjections atomic.Value 438 } 439 440 func (s *durableIndexState) coversAllColumns(i *doltIndex) bool { 441 coversI := atomic.LoadUint32(&s.coversAllCols) 442 if coversI != 0 { 443 return coversI == 1 444 } 445 cols := i.Schema().GetAllCols() 446 var idxCols *schema.ColCollection 447 if types.IsFormat_DOLT(i.Format()) { 448 // prolly indexes can cover an index lookup using 449 // both the key and value fields of the index, 450 // this allows using covering index machinery for 451 // primary key index lookups. 452 idxCols = i.IndexSchema().GetAllCols() 453 } else { 454 // to cover an index lookup, noms indexes must 455 // contain all fields in the index's key. 456 idxCols = i.IndexSchema().GetPKCols() 457 } 458 covers := true 459 for i := 0; i < cols.Size(); i++ { 460 col := cols.GetByIndex(i) 461 if _, ok := idxCols.GetByNameCaseInsensitive(col.Name); !ok { 462 covers = false 463 break 464 } 465 } 466 if covers { 467 atomic.StoreUint32(&s.coversAllCols, 1) 468 } else { 469 atomic.StoreUint32(&s.coversAllCols, 2) 470 } 471 return covers 472 } 473 474 func (s *durableIndexState) lookupTags(i *doltIndex) map[uint64]int { 475 cached := s.cachedLookupTags.Load() 476 if cached == nil { 477 tags := i.Schema().GetPKCols().Tags 478 sz := len(tags) 479 if sz == 0 { 480 sz = 1 481 } 482 tocache := make(map[uint64]int, sz) 483 for i, tag := range tags { 484 tocache[tag] = i 485 } 486 if len(tocache) == 0 { 487 tocache[schema.KeylessRowIdTag] = 0 488 } 489 s.cachedLookupTags.Store(tocache) 490 cached = tocache 491 } 492 return cached.(map[uint64]int) 493 } 494 495 func projectionsEqual(x, y []uint64) bool { 496 if len(x) != len(y) { 497 return false 498 } 499 var i, j int 500 for i < len(x) && j < len(y) { 501 if x[i] != y[j] { 502 return false 503 } 504 i++ 505 j++ 506 } 507 return true 508 } 509 func (s *durableIndexState) sqlRowConverter(i *doltIndex, proj []uint64) *KVToSqlRowConverter { 510 cachedProjections := s.cachedProjections.Load() 511 cachedConverter := s.cachedSqlRowConverter.Load() 512 if cachedConverter == nil || !projectionsEqual(proj, cachedProjections.([]uint64)) { 513 cachedConverter = NewKVToSqlRowConverterForCols(i.Format(), i.Schema(), proj) 514 s.cachedSqlRowConverter.Store(cachedConverter) 515 s.cachedProjections.Store(proj) 516 } 517 return cachedConverter.(*KVToSqlRowConverter) 518 } 519 520 type cachedDurableIndexes struct { 521 val atomic.Value 522 } 523 524 func (i *cachedDurableIndexes) load() *durableIndexState { 525 l := i.val.Load() 526 if l == nil { 527 return nil 528 } 529 return l.(*durableIndexState) 530 } 531 532 func (i *cachedDurableIndexes) store(v *durableIndexState) { 533 i.val.Store(v) 534 } 535 536 type doltIndex struct { 537 id string 538 tblName string 539 dbName string 540 541 columns []schema.Column 542 543 indexSch schema.Schema 544 tableSch schema.Schema 545 unique bool 546 spatial bool 547 fulltext bool 548 isPk bool 549 comment string 550 order sql.IndexOrder 551 552 constrainedToLookupExpression bool 553 554 vrw types.ValueReadWriter 555 ns tree.NodeStore 556 keyBld *val.TupleBuilder 557 558 cache cachedDurableIndexes 559 doltBinFormat bool 560 561 prefixLengths []uint16 562 fullTextProps schema.FullTextProperties 563 } 564 565 var _ DoltIndex = (*doltIndex)(nil) 566 var _ sql.ExtendedIndex = (*doltIndex)(nil) 567 568 // CanSupport implements sql.Index 569 func (di *doltIndex) CanSupport(...sql.Range) bool { 570 return true 571 } 572 573 // ColumnExpressionTypes implements the interface sql.Index. 574 func (di *doltIndex) ColumnExpressionTypes() []sql.ColumnExpressionType { 575 cets := make([]sql.ColumnExpressionType, len(di.columns)) 576 for i, col := range di.columns { 577 cets[i] = sql.ColumnExpressionType{ 578 Expression: di.tblName + "." + col.Name, 579 Type: col.TypeInfo.ToSqlType(), 580 } 581 } 582 return cets 583 } 584 585 // ExtendedColumnExpressionTypes implements the interface sql.ExtendedIndex. 586 func (di *doltIndex) ExtendedColumnExpressionTypes() []sql.ColumnExpressionType { 587 pkCols := di.indexSch.GetPKCols() 588 cets := make([]sql.ColumnExpressionType, 0, len(pkCols.Tags)) 589 _ = pkCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) { 590 cets = append(cets, sql.ColumnExpressionType{ 591 Expression: di.tblName + "." + col.Name, 592 Type: col.TypeInfo.ToSqlType(), 593 }) 594 return false, nil 595 }) 596 return cets 597 } 598 599 func (di *doltIndex) getDurableState(ctx *sql.Context, ti DoltTableable) (*durableIndexState, error) { 600 var newkey doltdb.DataCacheKey 601 var cancache bool 602 var err error 603 newkey, cancache, err = ti.DataCacheKey(ctx) 604 if err != nil { 605 return nil, err 606 } 607 608 var ret *durableIndexState 609 if cancache { 610 ret = di.cache.load() 611 if ret != nil && ret.key == newkey { 612 return ret, nil 613 } 614 } 615 616 ret = new(durableIndexState) 617 618 var t *doltdb.Table 619 t, err = ti.DoltTable(ctx) 620 if err != nil { 621 return nil, err 622 } 623 624 var primary, secondary durable.Index 625 626 primary, err = t.GetRowData(ctx) 627 if err != nil { 628 return nil, err 629 } 630 if di.ID() == "PRIMARY" { 631 secondary = primary 632 } else { 633 secondary, err = t.GetIndexRowData(ctx, di.ID()) 634 if err != nil { 635 return nil, err 636 } 637 } 638 639 ret.key = newkey 640 ret.Primary = primary 641 ret.Secondary = secondary 642 643 if cancache { 644 di.cache.store(ret) 645 } 646 647 return ret, nil 648 } 649 650 func (di *doltIndex) prollyRanges(ctx *sql.Context, ns tree.NodeStore, ranges ...sql.Range) ([]prolly.Range, error) { 651 //todo(max): it is important that *doltIndexLookup maintains a reference 652 // to empty sqlRanges, otherwise the analyzer will dismiss the index and 653 // chose a less optimal lookup index. This is a GMS concern, so GMS should 654 // really not rely on the integrator to maintain this tenuous relationship. 655 var err error 656 if !di.spatial { 657 ranges, err = pruneEmptyRanges(ranges) 658 if err != nil { 659 return nil, err 660 } 661 } 662 pranges, err := di.prollyRangesFromSqlRanges(ctx, ns, ranges, di.keyBld) 663 if err != nil { 664 return nil, err 665 } 666 return pranges, nil 667 } 668 669 func (di *doltIndex) nomsRanges(ctx *sql.Context, iranges ...sql.Range) ([]*noms.ReadRange, error) { 670 // This might remain nil if the given nomsRanges each contain an EmptyRange for one of the columns. This will just 671 // cause the lookup to return no rows, which is the desired behavior. 672 var readRanges []*noms.ReadRange 673 674 ranges := make([]sql.Range, len(iranges)) 675 676 for i := range iranges { 677 ranges[i] = DropTrailingAllColumnExprs(iranges[i]) 678 } 679 680 ranges, err := SplitNullsFromRanges(ranges) 681 if err != nil { 682 return nil, err 683 } 684 685 RangeLoop: 686 for _, rang := range ranges { 687 if len(rang) > len(di.columns) { 688 return nil, nil 689 } 690 691 var lowerKeys []interface{} 692 for _, rangeColumnExpr := range rang { 693 if rangeColumnExpr.HasLowerBound() { 694 lowerKeys = append(lowerKeys, sql.GetRangeCutKey(rangeColumnExpr.LowerBound)) 695 } else { 696 break 697 } 698 } 699 lowerboundTuple, err := di.keysToTuple(ctx, lowerKeys) 700 if err != nil { 701 return nil, err 702 } 703 704 rangeCheck := make(nomsRangeCheck, len(rang)) 705 for i, rangeColumnExpr := range rang { 706 // An empty column expression will mean that no values for this column can be matched, so we can discard the 707 // entire range. 708 if ok, err := rangeColumnExpr.IsEmpty(); err != nil { 709 return nil, err 710 } else if ok { 711 continue RangeLoop 712 } 713 714 cb := columnBounds{} 715 // We promote each type as the value has already been validated against the type 716 promotedType := di.columns[i].TypeInfo.Promote() 717 if rangeColumnExpr.HasLowerBound() { 718 key := sql.GetRangeCutKey(rangeColumnExpr.LowerBound) 719 val, err := promotedType.ConvertValueToNomsValue(ctx, di.vrw, key) 720 if err != nil { 721 return nil, err 722 } 723 if rangeColumnExpr.LowerBound.TypeAsLowerBound() == sql.Closed { 724 // For each lowerbound case, we set the upperbound to infinity, as the upperbound can increment to 725 // get to the desired overall case while retaining whatever was set for the lowerbound. 726 cb.boundsCase = boundsCase_greaterEquals_infinity 727 } else { 728 cb.boundsCase = boundsCase_greater_infinity 729 } 730 cb.lowerbound = val 731 } else { 732 cb.boundsCase = boundsCase_infinity_infinity 733 } 734 if rangeColumnExpr.HasUpperBound() { 735 key := sql.GetRangeCutKey(rangeColumnExpr.UpperBound) 736 val, err := promotedType.ConvertValueToNomsValue(ctx, di.vrw, key) 737 if err != nil { 738 return nil, err 739 } 740 if rangeColumnExpr.UpperBound.TypeAsUpperBound() == sql.Closed { 741 // Bounds cases are enum aliases on bytes, and they're arranged such that we can increment the case 742 // that was previously set when evaluating the lowerbound to get the proper overall case. 743 cb.boundsCase += 1 744 } else { 745 cb.boundsCase += 2 746 } 747 cb.upperbound = val 748 } 749 if rangeColumnExpr.Type() == sql.RangeType_EqualNull { 750 cb.boundsCase = boundsCase_isNull 751 } 752 rangeCheck[i] = cb 753 } 754 755 // If the suffix checks will always succeed (both bounds are infinity) then they can be removed to reduce the 756 // number of checks that are called per-row. Always leave one check to skip NULLs. 757 for i := len(rangeCheck) - 1; i > 0 && len(rangeCheck) > 1; i-- { 758 if rangeCheck[i].boundsCase == boundsCase_infinity_infinity { 759 rangeCheck = rangeCheck[:i] 760 } else { 761 break 762 } 763 } 764 765 readRanges = append(readRanges, &noms.ReadRange{ 766 Start: lowerboundTuple, 767 Inclusive: true, // The checks handle whether a value is included or not 768 Reverse: false, 769 Check: rangeCheck, 770 }) 771 } 772 773 return readRanges, nil 774 } 775 776 func (di *doltIndex) sqlRowConverter(s *durableIndexState, columns []uint64) *KVToSqlRowConverter { 777 return s.sqlRowConverter(di, columns) 778 } 779 780 func (di *doltIndex) lookupTags(s *durableIndexState) map[uint64]int { 781 return s.lookupTags(di) 782 } 783 784 func (di *doltIndex) coversColumns(s *durableIndexState, cols []uint64) bool { 785 if cols == nil { 786 return s.coversAllColumns(di) 787 } 788 789 if len(di.prefixLengths) > 0 { 790 return false 791 } 792 793 if di.IsSpatial() { 794 return false 795 } 796 797 var idxCols *schema.ColCollection 798 if types.IsFormat_DOLT(di.Format()) { 799 // prolly indexes can cover an index lookup using 800 // both the key and value fields of the index, 801 // this allows using covering index machinery for 802 // primary key index lookups. 803 idxCols = di.IndexSchema().GetAllCols() 804 } else { 805 // to cover an index lookup, noms indexes must 806 // contain all fields in the index's key. 807 idxCols = di.IndexSchema().GetPKCols() 808 } 809 810 if len(cols) > len(idxCols.Tags) { 811 return false 812 } 813 814 covers := true 815 for _, colTag := range cols { 816 if _, ok := idxCols.TagToIdx[colTag]; !ok { 817 covers = false 818 break 819 } 820 } 821 822 return covers 823 } 824 825 func (di *doltIndex) HandledFilters(filters []sql.Expression) []sql.Expression { 826 if !di.constrainedToLookupExpression { 827 return nil 828 } 829 830 // filters on indexes with prefix lengths are not completely handled 831 if len(di.prefixLengths) > 0 { 832 return nil 833 } 834 835 var handled []sql.Expression 836 for _, f := range filters { 837 if !expression.PreciseComparison(f) { 838 continue 839 } 840 handled = append(handled, f) 841 } 842 return handled 843 } 844 845 func (di *doltIndex) isMockIndex() bool { 846 return di.indexSch == nil 847 } 848 849 // HasContentHashedField returns true if any of the fields in this index are "content-hashed", meaning that the index 850 // stores a hash of the content, instead of the content itself. This is currently limited to unique indexes, which can 851 // use this property to store hashes of TEXT or BLOB fields and still efficiently detect uniqueness. 852 func (di *doltIndex) HasContentHashedField() bool { 853 // content-hashed fields can currently only be used in unique indexes 854 if !di.IsUnique() { 855 return false 856 } 857 858 contentHashedField := false 859 if di.isMockIndex() { 860 return false 861 } 862 indexPkCols := di.indexSch.GetPKCols() 863 indexPkCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) { 864 i := indexPkCols.TagToIdx[tag] 865 prefixLength := uint16(0) 866 if len(di.prefixLengths) > i { 867 prefixLength = di.prefixLengths[i] 868 } 869 870 if sqltypes.IsTextBlob(col.TypeInfo.ToSqlType()) && prefixLength == 0 { 871 contentHashedField = true 872 return true, nil 873 } 874 875 return false, nil 876 }) 877 878 return contentHashedField 879 } 880 881 func (di *doltIndex) Order() sql.IndexOrder { 882 if di.HasContentHashedField() { 883 return sql.IndexOrderNone 884 } 885 886 return di.order 887 } 888 889 func (di *doltIndex) Reversible() bool { 890 if di.HasContentHashedField() { 891 return false 892 } 893 894 return di.doltBinFormat 895 } 896 897 // Database implement sql.Index 898 func (di *doltIndex) Database() string { 899 return di.dbName 900 } 901 902 // Expressions implements sql.Index 903 func (di *doltIndex) Expressions() []string { 904 strs := make([]string, len(di.columns)) 905 for i, col := range di.columns { 906 strs[i] = di.tblName + "." + col.Name 907 } 908 return strs 909 } 910 911 // ExtendedExpressions implements sql.ExtendedIndex 912 func (di *doltIndex) ExtendedExpressions() []string { 913 pkCols := di.indexSch.GetPKCols() 914 strs := make([]string, 0, len(pkCols.Tags)) 915 _ = pkCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) { 916 strs = append(strs, di.tblName+"."+col.Name) 917 return false, nil 918 }) 919 return strs 920 } 921 922 // ID implements sql.Index 923 func (di *doltIndex) ID() string { 924 return di.id 925 } 926 927 // IsUnique implements sql.Index 928 func (di *doltIndex) IsUnique() bool { 929 return di.unique 930 } 931 932 // IsSpatial implements sql.Index 933 func (di *doltIndex) IsSpatial() bool { 934 return di.spatial 935 } 936 937 // IsFullText implements sql.Index 938 func (di *doltIndex) IsFullText() bool { 939 return di.fulltext 940 } 941 942 // IsPrimaryKey implements DoltIndex. 943 func (di *doltIndex) IsPrimaryKey() bool { 944 return di.isPk 945 } 946 947 // Comment implements sql.Index 948 func (di *doltIndex) Comment() string { 949 return di.comment 950 } 951 952 // PrefixLengths implements sql.Index 953 func (di *doltIndex) PrefixLengths() []uint16 { 954 return di.prefixLengths 955 } 956 957 // IndexType implements sql.Index 958 func (di *doltIndex) IndexType() string { 959 return "BTREE" 960 } 961 962 // IsGenerated implements sql.Index 963 func (di *doltIndex) IsGenerated() bool { 964 return false 965 } 966 967 // Schema returns the dolt Table schema of this index. 968 func (di *doltIndex) Schema() schema.Schema { 969 return di.tableSch 970 } 971 972 // IndexSchema returns the dolt index schema. 973 func (di *doltIndex) IndexSchema() schema.Schema { 974 return di.indexSch 975 } 976 977 // Table implements sql.Index 978 func (di *doltIndex) Table() string { 979 return di.tblName 980 } 981 982 func (di *doltIndex) Format() *types.NomsBinFormat { 983 return di.vrw.Format() 984 } 985 986 // FullTextTableNames implements sql.Index 987 func (di *doltIndex) FullTextTableNames(ctx *sql.Context) (fulltext.IndexTableNames, error) { 988 return fulltext.IndexTableNames{ 989 Config: di.fullTextProps.ConfigTable, 990 Position: di.fullTextProps.PositionTable, 991 DocCount: di.fullTextProps.DocCountTable, 992 GlobalCount: di.fullTextProps.GlobalCountTable, 993 RowCount: di.fullTextProps.RowCountTable, 994 }, nil 995 } 996 997 // FullTextKeyColumns implements sql.Index 998 func (di *doltIndex) FullTextKeyColumns(ctx *sql.Context) (fulltext.KeyColumns, error) { 999 var positions []int 1000 if len(di.fullTextProps.KeyPositions) > 0 { 1001 positions = make([]int, len(di.fullTextProps.KeyPositions)) 1002 for i := range positions { 1003 positions[i] = int(di.fullTextProps.KeyPositions[i]) 1004 } 1005 } 1006 1007 return fulltext.KeyColumns{ 1008 Type: fulltext.KeyType(di.fullTextProps.KeyType), 1009 Name: di.fullTextProps.KeyName, 1010 Positions: positions, 1011 }, nil 1012 } 1013 1014 // keysToTuple returns a tuple that indicates the starting point for an index. The empty tuple will cause the index to 1015 // start at the very beginning. 1016 func (di *doltIndex) keysToTuple(ctx *sql.Context, keys []interface{}) (types.Tuple, error) { 1017 nbf := di.vrw.Format() 1018 if len(keys) > len(di.columns) { 1019 return types.EmptyTuple(nbf), errors.New("too many keys for the column count") 1020 } 1021 1022 vals := make([]types.Value, len(keys)*2) 1023 for i := range keys { 1024 col := di.columns[i] 1025 // As an example, if our TypeInfo is Int8, we should not fail to create a tuple if we are returning all keys 1026 // that have a value of less than 9001, thus we promote the TypeInfo to the widest type. 1027 val, err := col.TypeInfo.Promote().ConvertValueToNomsValue(ctx, di.vrw, keys[i]) 1028 if err != nil { 1029 return types.EmptyTuple(nbf), err 1030 } 1031 vals[2*i] = types.Uint(col.Tag) 1032 vals[2*i+1] = val 1033 } 1034 return types.NewTuple(nbf, vals...) 1035 } 1036 1037 var sharePool = pool.NewBuffPool() 1038 1039 func maybeGetKeyBuilder(idx durable.Index) *val.TupleBuilder { 1040 if types.IsFormat_DOLT(idx.Format()) { 1041 kd, _ := durable.ProllyMapFromIndex(idx).Descriptors() 1042 return val.NewTupleBuilder(kd) 1043 } 1044 return nil 1045 } 1046 1047 func pruneEmptyRanges(sqlRanges []sql.Range) (pruned []sql.Range, err error) { 1048 pruned = make([]sql.Range, 0, len(sqlRanges)) 1049 for _, sr := range sqlRanges { 1050 empty := false 1051 for _, colExpr := range sr { 1052 empty, err = colExpr.IsEmpty() 1053 if err != nil { 1054 return nil, err 1055 } else if empty { 1056 // one of the RangeColumnExprs in |sr| 1057 // is empty: prune the entire range 1058 break 1059 } 1060 } 1061 for _, ce := range sr { 1062 if lb, ok := ce.LowerBound.(sql.Below); ok && lb.Key == nil { 1063 empty = true 1064 break 1065 } 1066 } 1067 if !empty { 1068 pruned = append(pruned, sr) 1069 } 1070 } 1071 return pruned, nil 1072 } 1073 1074 // trimRangeCutValue will trim the key value retrieved, depending on its type and prefix length 1075 // TODO: this is just the trimKeyPart in the SecondaryIndexWriters, maybe find a different place 1076 func (di *doltIndex) trimRangeCutValue(to int, keyPart interface{}) interface{} { 1077 var prefixLength uint16 1078 if len(di.prefixLengths) > to { 1079 prefixLength = di.prefixLengths[to] 1080 } 1081 if prefixLength != 0 { 1082 switch kp := keyPart.(type) { 1083 case string: 1084 if prefixLength > uint16(len(kp)) { 1085 prefixLength = uint16(len(kp)) 1086 } 1087 keyPart = kp[:prefixLength] 1088 case []uint8: 1089 if prefixLength > uint16(len(kp)) { 1090 prefixLength = uint16(len(kp)) 1091 } 1092 keyPart = kp[:prefixLength] 1093 } 1094 } 1095 return keyPart 1096 } 1097 1098 func (di *doltIndex) valueReadWriter() types.ValueReadWriter { 1099 return di.vrw 1100 } 1101 1102 func (di *doltIndex) prollySpatialRanges(ranges []sql.Range) ([]prolly.Range, error) { 1103 // should be exactly one range 1104 rng := ranges[0][0] 1105 lower, upper := sql.GetRangeCutKey(rng.LowerBound), sql.GetRangeCutKey(rng.UpperBound) 1106 1107 minPoint, ok := lower.(sqltypes.Point) 1108 if !ok { 1109 return nil, fmt.Errorf("spatial index bounding box using non-point type") 1110 } 1111 maxPoint, ok := upper.(sqltypes.Point) 1112 if !ok { 1113 return nil, fmt.Errorf("spatial index bounding box using non-point type") 1114 } 1115 1116 var pRanges []prolly.Range 1117 zMin := tree.ZValue(minPoint) 1118 zMax := tree.ZValue(maxPoint) 1119 zRanges := tree.SplitZRanges(tree.ZRange{zMin, zMax}) 1120 for level := byte(0); level < 65; level++ { 1121 // For example, at highest level, we'll just look at origin point multiple times 1122 var prevMinCell, prevMaxCell val.Cell 1123 for i, zRange := range zRanges { 1124 minCell := tree.ZMask(level, zRange[0]) 1125 maxCell := tree.ZMask(level, zRange[1]) 1126 if i != 0 && minCell == prevMinCell && maxCell == prevMaxCell { 1127 continue 1128 } 1129 prevMinCell = minCell 1130 prevMaxCell = maxCell 1131 field := prolly.RangeField{ 1132 Exact: false, 1133 Lo: prolly.Bound{ 1134 Binding: true, 1135 Inclusive: true, 1136 Value: minCell[:], 1137 }, 1138 Hi: prolly.Bound{ 1139 Binding: true, 1140 Inclusive: true, 1141 Value: maxCell[:], 1142 }, 1143 } 1144 pRange := prolly.Range{ 1145 Fields: []prolly.RangeField{field}, 1146 Desc: di.keyBld.Desc, 1147 } 1148 pRanges = append(pRanges, pRange) 1149 } 1150 } 1151 1152 return pRanges, nil 1153 } 1154 1155 func (di *doltIndex) prollyRangesFromSqlRanges(ctx context.Context, ns tree.NodeStore, ranges []sql.Range, tb *val.TupleBuilder) ([]prolly.Range, error) { 1156 var err error 1157 if !di.spatial { 1158 ranges, err = pruneEmptyRanges(ranges) 1159 if err != nil { 1160 return nil, err 1161 } 1162 } 1163 1164 if di.spatial { 1165 return di.prollySpatialRanges(ranges) 1166 } 1167 1168 pranges := make([]prolly.Range, len(ranges)) 1169 for k, rng := range ranges { 1170 fields := make([]prolly.RangeField, len(rng)) 1171 for j, expr := range rng { 1172 if rangeCutIsBinding(expr.LowerBound) { 1173 // accumulate bound values in |tb| 1174 v, err := getRangeCutValue(expr.LowerBound, rng[j].Typ) 1175 if err != nil { 1176 return nil, err 1177 } 1178 nv := di.trimRangeCutValue(j, v) 1179 if err = tree.PutField(ctx, ns, tb, j, nv); err != nil { 1180 return nil, err 1181 } 1182 bound := expr.LowerBound.TypeAsLowerBound() 1183 fields[j].Lo = prolly.Bound{ 1184 Binding: true, 1185 Inclusive: bound == sql.Closed, 1186 } 1187 } else { 1188 fields[j].Lo = prolly.Bound{} 1189 } 1190 } 1191 // BuildPermissive() allows nulls in non-null fields 1192 tup := tb.BuildPermissive(sharePool) 1193 for i := range fields { 1194 fields[i].Lo.Value = tup.GetField(i) 1195 } 1196 1197 for i, expr := range rng { 1198 if rangeCutIsBinding(expr.UpperBound) { 1199 bound := expr.UpperBound.TypeAsUpperBound() 1200 // accumulate bound values in |tb| 1201 v, err := getRangeCutValue(expr.UpperBound, rng[i].Typ) 1202 if err != nil { 1203 return nil, err 1204 } 1205 nv := di.trimRangeCutValue(i, v) 1206 if err = tree.PutField(ctx, ns, tb, i, nv); err != nil { 1207 return nil, err 1208 } 1209 if vv, ok := v.([]byte); ok { 1210 v = string(vv) 1211 } 1212 if nvv, ok := nv.([]byte); ok { 1213 nv = string(nvv) 1214 } 1215 1216 fields[i].Hi = prolly.Bound{ 1217 Binding: true, 1218 Inclusive: bound == sql.Closed || nv != v, 1219 } 1220 } else { 1221 fields[i].Hi = prolly.Bound{} 1222 } 1223 } 1224 1225 tup = tb.BuildPermissive(sharePool) 1226 for i := range fields { 1227 fields[i].Hi.Value = tup.GetField(i) 1228 } 1229 1230 order := di.keyBld.Desc.Comparator() 1231 for i, field := range fields { 1232 // lookups on non-unique indexes can't be point lookups 1233 if !di.unique { 1234 fields[i].Exact = false 1235 continue 1236 } 1237 if !field.Hi.Binding || !field.Lo.Binding { 1238 fields[i].Exact = false 1239 continue 1240 } 1241 typ := di.keyBld.Desc.Types[i] 1242 cmp := order.CompareValues(i, field.Hi.Value, field.Lo.Value, typ) 1243 fields[i].Exact = cmp == 0 1244 } 1245 pranges[k] = prolly.Range{ 1246 Fields: fields, 1247 Desc: di.keyBld.Desc, 1248 Tup: tup, 1249 } 1250 } 1251 return pranges, nil 1252 } 1253 1254 func rangeCutIsBinding(c sql.RangeCut) bool { 1255 switch c.(type) { 1256 case sql.Below, sql.Above, sql.AboveNull: 1257 return true 1258 case sql.BelowNull, sql.AboveAll: 1259 return false 1260 default: 1261 panic(fmt.Errorf("unknown range cut %v", c)) 1262 } 1263 } 1264 1265 func getRangeCutValue(cut sql.RangeCut, typ sql.Type) (interface{}, error) { 1266 if _, ok := cut.(sql.AboveNull); ok { 1267 return nil, nil 1268 } 1269 ret, oob, err := typ.Convert(sql.GetRangeCutKey(cut)) 1270 if oob == sql.OutOfRange { 1271 return ret, nil 1272 } 1273 return ret, err 1274 } 1275 1276 // DropTrailingAllColumnExprs returns the Range with any |AllColumnExprs| at the end of it removed. 1277 // 1278 // Sometimes when we construct read ranges against laid out index structures, 1279 // we want to ignore these trailing clauses. 1280 func DropTrailingAllColumnExprs(r sql.Range) sql.Range { 1281 i := len(r) 1282 for i > 0 { 1283 if r[i-1].Type() != sql.RangeType_All { 1284 break 1285 } 1286 i-- 1287 } 1288 return r[:i] 1289 } 1290 1291 // SplitNullsFromRange given a sql.Range, splits it up into multiple ranges, where each column expr 1292 // that could be NULL and non-NULL is replaced with two column expressions, one 1293 // matching only NULL, and one matching the non-NULL component. 1294 // 1295 // This is for building physical scans against storage which does not store 1296 // NULL contiguous and ordered < non-NULL values. 1297 func SplitNullsFromRange(r sql.Range) ([]sql.Range, error) { 1298 res := []sql.Range{{}} 1299 1300 for _, rce := range r { 1301 if _, ok := rce.LowerBound.(sql.BelowNull); ok { 1302 // May include NULL. Split it and add each non-empty range. 1303 withnull, nullok, err := rce.TryIntersect(sql.NullRangeColumnExpr(rce.Typ)) 1304 if err != nil { 1305 return nil, err 1306 } 1307 fornull := res[:] 1308 withoutnull, withoutnullok, err := rce.TryIntersect(sql.NotNullRangeColumnExpr(rce.Typ)) 1309 if err != nil { 1310 return nil, err 1311 } 1312 forwithoutnull := res[:] 1313 if withoutnullok && nullok { 1314 n := len(res) 1315 res = append(res, res...) 1316 fornull = res[:n] 1317 forwithoutnull = res[n:] 1318 } 1319 if nullok { 1320 for j := range fornull { 1321 fornull[j] = append(fornull[j], withnull) 1322 } 1323 } 1324 if withoutnullok { 1325 for j := range forwithoutnull { 1326 forwithoutnull[j] = append(forwithoutnull[j], withoutnull) 1327 } 1328 } 1329 } else { 1330 for j := range res { 1331 res[j] = append(res[j], rce) 1332 } 1333 } 1334 } 1335 1336 return res, nil 1337 } 1338 1339 // SplitNullsFromRanges splits nulls from ranges. 1340 func SplitNullsFromRanges(rs []sql.Range) ([]sql.Range, error) { 1341 var ret []sql.Range 1342 for _, r := range rs { 1343 nr, err := SplitNullsFromRange(r) 1344 if err != nil { 1345 return nil, err 1346 } 1347 ret = append(ret, nr...) 1348 } 1349 return ret, nil 1350 } 1351 1352 // LookupToPointSelectStr converts a set of point lookups on string 1353 // fields, returning a nil list and false if any expression failed 1354 // to convert. 1355 func LookupToPointSelectStr(lookup sql.IndexLookup) ([]string, bool) { 1356 var selects []string 1357 for _, r := range lookup.Ranges { 1358 if len(r) != 1 { 1359 return nil, false 1360 } 1361 lb, ok := r[0].LowerBound.(sql.Below) 1362 if !ok { 1363 return nil, false 1364 } 1365 if lb.Key == nil { 1366 continue 1367 } 1368 lk, ok := lb.Key.(string) 1369 if !ok { 1370 return nil, false 1371 } 1372 ub, ok := r[0].UpperBound.(sql.Above) 1373 if !ok { 1374 return nil, false 1375 } 1376 if ub.Key == nil { 1377 continue 1378 } 1379 uk, ok := ub.Key.(string) 1380 if uk != lk { 1381 return nil, false 1382 } 1383 selects = append(selects, uk) 1384 } 1385 return selects, true 1386 } 1387 1388 // HashesToCommits converts a set of strings into hashes, commits, 1389 // and commit metadata. Strings that are invalid hashes, or do 1390 // not refer to commits are filtered from the return lists. 1391 // 1392 // The doltdb.Working edge case is handled specially depending on 1393 // whether we are: 1) interested in converting "WORKING" into a 1394 // commit hash (or leave it as "WORKING"), and 2) whether we want 1395 // to attempt to load a commit if WORKING == HEAD. The commit and 1396 // metadata for a working hash will be nil if indicated. 1397 func HashesToCommits( 1398 ctx *sql.Context, 1399 ddb *doltdb.DoltDB, 1400 hashStrs []string, 1401 head *doltdb.Commit, 1402 convertWorkingToCommit bool, 1403 ) ([]hash.Hash, []*doltdb.Commit, []*datas.CommitMeta) { 1404 var hashes []hash.Hash 1405 var commits []*doltdb.Commit 1406 var metas []*datas.CommitMeta 1407 var err error 1408 var ok bool 1409 for _, hs := range hashStrs { 1410 var h hash.Hash 1411 var cm *doltdb.Commit 1412 var meta *datas.CommitMeta 1413 switch hs { 1414 case doltdb.Working: 1415 if head == nil { 1416 continue 1417 } 1418 h, err = head.HashOf() 1419 if err != nil { 1420 continue 1421 } 1422 1423 if convertWorkingToCommit { 1424 cm, err = doltdb.HashToCommit(ctx, ddb.ValueReadWriter(), ddb.NodeStore(), h) 1425 if err != nil { 1426 cm = nil 1427 } else { 1428 cm = head 1429 meta, err = cm.GetCommitMeta(ctx) 1430 if err != nil { 1431 continue 1432 } 1433 } 1434 } 1435 default: 1436 h, ok = hash.MaybeParse(hs) 1437 if !ok { 1438 continue 1439 } 1440 cm, err = doltdb.HashToCommit(ctx, ddb.ValueReadWriter(), ddb.NodeStore(), h) 1441 if err != nil { 1442 continue 1443 } 1444 meta, err = cm.GetCommitMeta(ctx) 1445 if err != nil { 1446 continue 1447 } 1448 } 1449 if err != nil { 1450 continue 1451 } 1452 hashes = append(hashes, h) 1453 commits = append(commits, cm) 1454 metas = append(metas, meta) 1455 } 1456 return hashes, commits, metas 1457 }