github.com/matrixorigin/matrixone@v1.2.0/pkg/vm/engine/tae/txn/txnimpl/table.go (about) 1 // Copyright 2021 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package txnimpl 16 17 import ( 18 "bytes" 19 "context" 20 "fmt" 21 "runtime/trace" 22 "time" 23 24 "github.com/matrixorigin/matrixone/pkg/fileservice" 25 "github.com/matrixorigin/matrixone/pkg/util" 26 "go.uber.org/zap" 27 28 "github.com/matrixorigin/matrixone/pkg/perfcounter" 29 30 "github.com/RoaringBitmap/roaring" 31 32 "github.com/matrixorigin/matrixone/pkg/common/moerr" 33 "github.com/matrixorigin/matrixone/pkg/common/moprobe" 34 "github.com/matrixorigin/matrixone/pkg/container/types" 35 "github.com/matrixorigin/matrixone/pkg/logutil" 36 "github.com/matrixorigin/matrixone/pkg/objectio" 37 apipb "github.com/matrixorigin/matrixone/pkg/pb/api" 38 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/blockio" 39 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog" 40 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common" 41 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/containers" 42 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/handle" 43 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/txnif" 44 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/index" 45 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/model" 46 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables/updates" 47 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/wal" 48 ) 49 50 var ( 51 ErrDuplicateNode = moerr.NewInternalErrorNoCtx("tae: duplicate node") 52 ) 53 54 type txnEntries struct { 55 entries []txnif.TxnEntry 56 mask *roaring.Bitmap 57 } 58 59 func newTxnEntries() *txnEntries { 60 return &txnEntries{ 61 entries: make([]txnif.TxnEntry, 0), 62 mask: roaring.New(), 63 } 64 } 65 66 func (entries *txnEntries) Len() int { 67 return len(entries.entries) 68 } 69 70 func (entries *txnEntries) Append(entry txnif.TxnEntry) { 71 entries.entries = append(entries.entries, entry) 72 } 73 74 func (entries *txnEntries) Delete(idx int) { 75 entries.mask.Add(uint32(idx)) 76 } 77 78 func (entries *txnEntries) IsDeleted(idx int) bool { 79 return entries.mask.ContainsInt(idx) 80 } 81 82 func (entries *txnEntries) AnyDelete() bool { 83 return !entries.mask.IsEmpty() 84 } 85 86 func (entries *txnEntries) Close() { 87 entries.mask = nil 88 entries.entries = nil 89 } 90 91 type deleteNode struct { 92 DeleteNodes []txnif.DeleteNode 93 idx []int 94 } 95 96 func newDeleteNode(node txnif.DeleteNode, idx int) *deleteNode { 97 nodes := []txnif.DeleteNode{node} 98 return &deleteNode{ 99 DeleteNodes: nodes, 100 idx: []int{idx}, 101 } 102 } 103 104 type txnTable struct { 105 store *txnStore 106 createEntry txnif.TxnEntry 107 dropEntry txnif.TxnEntry 108 tableSpace *tableSpace 109 deleteNodes map[common.ID]*deleteNode 110 entry *catalog.TableEntry 111 schema *catalog.Schema 112 logs []wal.LogEntry 113 114 dedupedObjectHint uint64 115 dedupedBlockID *types.Blockid 116 117 txnEntries *txnEntries 118 csnStart uint32 119 120 idx int 121 } 122 123 func newTxnTable(store *txnStore, entry *catalog.TableEntry) (*txnTable, error) { 124 schema := entry.GetVisibleSchema(store.txn) 125 if schema == nil { 126 return nil, moerr.NewInternalErrorNoCtx("No visible schema for ts %s", store.txn.GetStartTS().ToString()) 127 } 128 tbl := &txnTable{ 129 store: store, 130 entry: entry, 131 schema: schema, 132 deleteNodes: make(map[common.ID]*deleteNode), 133 logs: make([]wal.LogEntry, 0), 134 txnEntries: newTxnEntries(), 135 } 136 return tbl, nil 137 } 138 func (tbl *txnTable) getNormalDeleteNode(id common.ID) *updates.DeleteNode { 139 nodes, ok := tbl.deleteNodes[id] 140 if !ok { 141 return nil 142 } 143 for _, node := range nodes.DeleteNodes { 144 if !node.IsPersistedDeletedNode() { 145 return node.(*updates.DeleteNode) 146 } 147 } 148 return nil 149 } 150 func (tbl *txnTable) PrePreareTransfer(phase string, ts types.TS) (err error) { 151 return tbl.TransferDeletes(ts, phase) 152 } 153 154 func (tbl *txnTable) TransferDeleteIntent( 155 id *common.ID, 156 row uint32) (changed bool, nid *common.ID, nrow uint32, err error) { 157 pinned, err := tbl.store.rt.TransferTable.Pin(*id) 158 if err != nil { 159 err = nil 160 return 161 } 162 defer pinned.Close() 163 entry, err := tbl.store.warChecker.CacheGet( 164 tbl.entry.GetDB().ID, 165 id.TableID, 166 id.ObjectID()) 167 if err != nil { 168 panic(err) 169 } 170 ts := types.BuildTS(time.Now().UTC().UnixNano(), 0) 171 if err = readWriteConfilictCheck(entry.BaseEntryImpl, ts); err == nil { 172 return 173 } 174 err = nil 175 nid = &common.ID{ 176 TableID: id.TableID, 177 } 178 rowID, ok := pinned.Item().Transfer(row) 179 if !ok { 180 err = moerr.NewTxnWWConflictNoCtx(0, "") 181 return 182 } 183 changed = true 184 nid.BlockID, nrow = rowID.Decode() 185 return 186 } 187 188 func (tbl *txnTable) TransferDeletes(ts types.TS, phase string) (err error) { 189 if tbl.store.rt.TransferTable == nil { 190 return 191 } 192 if len(tbl.deleteNodes) == 0 { 193 return 194 } 195 for id, nodes := range tbl.deleteNodes { 196 for offset, node := range nodes.DeleteNodes { 197 // search the read set to check wether the delete node relevant 198 // block was deleted. 199 // if not deleted, go to next 200 // if deleted, try to transfer the delete node 201 if err = tbl.store.warChecker.checkOne( 202 &id, 203 ts, 204 ); err == nil { 205 continue 206 } 207 208 // if the error is not a r-w conflict. something wrong really happened 209 if !moerr.IsMoErrCode(err, moerr.ErrTxnRWConflict) { 210 return 211 } 212 213 // try to transfer the delete node 214 // here are some possible returns 215 // nil: transferred successfully 216 // ErrTxnRWConflict: the target block was also be compacted 217 // ErrTxnWWConflict: w-w error 218 if _, err = tbl.TransferDeleteNode(&id, node, offset, nodes.idx[offset], phase, ts); err != nil { 219 return 220 } 221 222 } 223 } 224 return 225 } 226 227 // recurTransferDelete recursively transfer the deletes to the target block. 228 // memo stores the pined transfer hash page for deleted and committed blocks. 229 // id is the deleted and committed block to transfer 230 func (tbl *txnTable) recurTransferDelete( 231 memo map[types.Blockid]*common.PinnedItem[*model.TransferHashPage], 232 page *model.TransferHashPage, 233 id *common.ID, // the block had been deleted and committed. 234 row uint32, 235 pk containers.Vector, 236 depth int, 237 ts types.TS) error { 238 239 var page2 *common.PinnedItem[*model.TransferHashPage] 240 241 rowID, ok := page.Transfer(row) 242 if !ok { 243 err := moerr.NewTxnWWConflictNoCtx(0, "") 244 msg := fmt.Sprintf("table-%d blk-%d delete row-%d depth-%d", 245 id.TableID, 246 id.BlockID, 247 row, 248 depth) 249 logutil.Warnf("[ts=%s]TransferDeleteNode: %v", 250 tbl.store.txn.GetStartTS().ToString(), 251 msg) 252 return err 253 } 254 blockID, offset := rowID.Decode() 255 newID := &common.ID{ 256 DbID: id.DbID, 257 TableID: id.TableID, 258 BlockID: blockID, 259 } 260 261 //check if the target block had been soft deleted and committed before ts, 262 //if not, transfer the deletes to the target block, 263 //otherwise recursively transfer the deletes to the next target block. 264 err := tbl.store.warChecker.checkOne(newID, ts) 265 if err == nil { 266 //transfer the deletes to the target block. 267 if err = tbl.RangeDelete(newID, offset, offset, pk, handle.DT_Normal); err != nil { 268 return err 269 } 270 common.DoIfInfoEnabled(func() { 271 logutil.Infof("depth-%d %s transfer delete from blk-%s row-%d to blk-%s row-%d", 272 depth, 273 tbl.schema.Name, 274 id.BlockID.String(), 275 row, 276 blockID.String(), 277 offset) 278 }) 279 return nil 280 } 281 tbl.store.warChecker.conflictSet[*newID.ObjectID()] = true 282 //prepare for recursively transfer the deletes to the next target block. 283 if page2, ok = memo[blockID]; !ok { 284 page2, err = tbl.store.rt.TransferTable.Pin(*newID) 285 if err != nil { 286 return err 287 } 288 memo[blockID] = page2 289 } 290 291 rowID, ok = page2.Item().Transfer(offset) 292 if !ok { 293 err := moerr.NewTxnWWConflictNoCtx(0, "") 294 msg := fmt.Sprintf("table-%d blk-%d delete row-%d depth-%d", 295 newID.TableID, 296 newID.BlockID, 297 offset, 298 depth) 299 logutil.Warnf("[ts=%s]TransferDeleteNode: %v", 300 tbl.store.txn.GetStartTS().ToString(), 301 msg) 302 return err 303 } 304 blockID, offset = rowID.Decode() 305 newID = &common.ID{ 306 DbID: id.DbID, 307 TableID: id.TableID, 308 BlockID: blockID, 309 } 310 //caudal recursion 311 return tbl.recurTransferDelete( 312 memo, 313 page2.Item(), 314 newID, 315 offset, 316 pk, 317 depth+1, 318 ts) 319 } 320 321 // TransferDeleteNode TODO::transfer persisted deletes to target block. 322 func (tbl *txnTable) TransferDeleteNode( 323 id *common.ID, node txnif.DeleteNode, 324 offset, idx int, phase string, ts types.TS, 325 ) (transferred bool, err error) { 326 rows := node.DeletedRows() 327 pk := node.DeletedPK() 328 if transferred, err = tbl.TransferDeleteRows(id, rows, pk, phase, ts); err != nil { 329 return 330 } 331 332 // rollback transferred delete node. should not fail 333 if err = node.PrepareRollback(); err != nil { 334 panic(err) 335 } 336 if err = node.ApplyRollback(); err != nil { 337 panic(err) 338 } 339 tbl.commitTransferDeleteNode(id, offset, idx) 340 return 341 } 342 343 func (tbl *txnTable) TransferDeleteRows( 344 id *common.ID, 345 rows []uint32, 346 pk map[uint32]containers.Vector, 347 phase string, 348 ts types.TS) (transferred bool, err error) { 349 memo := make(map[types.Blockid]*common.PinnedItem[*model.TransferHashPage]) 350 common.DoIfInfoEnabled(func() { 351 logutil.Info("[Start]", 352 common.AnyField("txn-start-ts", tbl.store.txn.GetStartTS().ToString()), 353 common.OperationField("transfer-deletes"), 354 common.OperandField(id.BlockString()), 355 common.AnyField("phase", phase)) 356 }) 357 defer func() { 358 common.DoIfInfoEnabled(func() { 359 logutil.Info("[End]", 360 common.AnyField("txn-start-ts", tbl.store.txn.GetStartTS().ToString()), 361 common.OperationField("transfer-deletes"), 362 common.OperandField(id.BlockString()), 363 common.AnyField("phase", phase), 364 common.ErrorField(err)) 365 }) 366 for _, m := range memo { 367 m.Close() 368 } 369 }() 370 371 pinned, err := tbl.store.rt.TransferTable.Pin(*id) 372 // cannot find a transferred record. maybe the transferred record was TTL'ed 373 // here we can convert the error back to r-w conflict 374 if err != nil { 375 err = moerr.NewTxnRWConflictNoCtx() 376 return 377 } 378 memo[id.BlockID] = pinned 379 380 // logutil.Infof("TransferDeleteNode deletenode %s", node.DeleteNode.(*updates.DeleteNode).GeneralVerboseString()) 381 page := pinned.Item() 382 depth := 0 383 for _, row := range rows { 384 if err = tbl.recurTransferDelete(memo, page, id, row, pk[row], depth, ts); err != nil { 385 return 386 } 387 } 388 389 return 390 } 391 392 func (tbl *txnTable) commitTransferDeleteNode(id *common.ID, offset, idx int) { 393 tbl.store.warChecker.Delete(id) 394 tbl.txnEntries.Delete(idx) 395 nodes := tbl.deleteNodes[*id] 396 if offset == len(nodes.DeleteNodes)-1 { 397 nodes.DeleteNodes = nodes.DeleteNodes[:offset] 398 } else { 399 nodes.DeleteNodes = append(nodes.DeleteNodes[:offset], nodes.DeleteNodes[offset+1:]...) 400 } 401 if len(nodes.DeleteNodes) == 0 { 402 delete(tbl.deleteNodes, *id) 403 } 404 } 405 406 func (tbl *txnTable) WaitSynced() { 407 for _, e := range tbl.logs { 408 if err := e.WaitDone(); err != nil { 409 panic(err) 410 } 411 e.Free() 412 } 413 } 414 415 func (tbl *txnTable) CollectCmd(cmdMgr *commandManager) (err error) { 416 tbl.csnStart = uint32(cmdMgr.GetCSN()) 417 for idx, txnEntry := range tbl.txnEntries.entries { 418 if tbl.txnEntries.IsDeleted(idx) { 419 continue 420 } 421 csn := cmdMgr.GetCSN() 422 cmd, err := txnEntry.MakeCommand(csn) 423 // logutil.Infof("%d-%d",csn,cmd.GetType()) 424 if err != nil { 425 return err 426 } 427 if cmd == nil { 428 panic(txnEntry) 429 } 430 cmdMgr.AddCmd(cmd) 431 } 432 if tbl.tableSpace != nil { 433 if err = tbl.tableSpace.CollectCmd(cmdMgr); err != nil { 434 return 435 } 436 } 437 return 438 } 439 440 func (tbl *txnTable) GetObject(id *types.Objectid) (obj handle.Object, err error) { 441 meta, err := tbl.store.warChecker.CacheGet( 442 tbl.entry.GetDB().ID, 443 tbl.entry.ID, 444 id) 445 if err != nil { 446 return 447 } 448 obj = buildObject(tbl, meta) 449 return 450 } 451 452 func (tbl *txnTable) SoftDeleteObject(id *types.Objectid) (err error) { 453 txnEntry, err := tbl.entry.DropObjectEntry(id, tbl.store.txn) 454 if err != nil { 455 return 456 } 457 tbl.store.IncreateWriteCnt() 458 if txnEntry != nil { 459 tbl.txnEntries.Append(txnEntry) 460 } 461 tbl.store.txn.GetMemo().AddObject(tbl.entry.GetDB().GetID(), tbl.entry.ID, id) 462 return 463 } 464 465 func (tbl *txnTable) CreateObject(is1PC bool) (obj handle.Object, err error) { 466 perfcounter.Update(tbl.store.ctx, func(counter *perfcounter.CounterSet) { 467 counter.TAE.Object.Create.Add(1) 468 }) 469 return tbl.createObject(catalog.ES_Appendable, is1PC, nil) 470 } 471 472 func (tbl *txnTable) CreateNonAppendableObject(is1PC bool, opts *objectio.CreateObjOpt) (obj handle.Object, err error) { 473 perfcounter.Update(tbl.store.ctx, func(counter *perfcounter.CounterSet) { 474 counter.TAE.Object.CreateNonAppendable.Add(1) 475 }) 476 return tbl.createObject(catalog.ES_NotAppendable, is1PC, opts) 477 } 478 479 func (tbl *txnTable) createObject(state catalog.EntryState, is1PC bool, opts *objectio.CreateObjOpt) (obj handle.Object, err error) { 480 var factory catalog.ObjectDataFactory 481 if tbl.store.dataFactory != nil { 482 factory = tbl.store.dataFactory.MakeObjectFactory() 483 } 484 var meta *catalog.ObjectEntry 485 if meta, err = tbl.entry.CreateObject(tbl.store.txn, state, opts, factory); err != nil { 486 return 487 } 488 obj = newObject(tbl, meta) 489 tbl.store.IncreateWriteCnt() 490 tbl.store.txn.GetMemo().AddObject(tbl.entry.GetDB().ID, tbl.entry.ID, &meta.ID) 491 if is1PC { 492 meta.Set1PC() 493 } 494 tbl.txnEntries.Append(meta) 495 return 496 } 497 498 func (tbl *txnTable) LogTxnEntry(entry txnif.TxnEntry, readed []*common.ID) (err error) { 499 tbl.store.IncreateWriteCnt() 500 tbl.txnEntries.Append(entry) 501 for _, id := range readed { 502 // warChecker skip non-block read 503 if objectio.IsEmptyBlkid(&id.BlockID) { 504 continue 505 } 506 507 // record block into read set 508 tbl.store.warChecker.InsertByID( 509 tbl.entry.GetDB().ID, 510 id.TableID, 511 id.ObjectID()) 512 } 513 return 514 } 515 516 func (tbl *txnTable) SetCreateEntry(e txnif.TxnEntry) { 517 if tbl.createEntry != nil { 518 panic("logic error") 519 } 520 tbl.store.IncreateWriteCnt() 521 tbl.store.txn.GetMemo().AddCatalogChange() 522 tbl.createEntry = e 523 tbl.txnEntries.Append(e) 524 } 525 526 func (tbl *txnTable) SetDropEntry(e txnif.TxnEntry) error { 527 if tbl.dropEntry != nil { 528 panic("logic error") 529 } 530 tbl.store.IncreateWriteCnt() 531 tbl.store.txn.GetMemo().AddCatalogChange() 532 tbl.dropEntry = e 533 tbl.txnEntries.Append(e) 534 return nil 535 } 536 537 func (tbl *txnTable) IsDeleted() bool { 538 return tbl.dropEntry != nil 539 } 540 541 // GetLocalSchema returns the schema remains in the txn table, rather than the 542 // latest schema in TableEntry 543 func (tbl *txnTable) GetLocalSchema() *catalog.Schema { 544 return tbl.schema 545 } 546 547 func (tbl *txnTable) GetMeta() *catalog.TableEntry { 548 return tbl.entry 549 } 550 551 func (tbl *txnTable) GetID() uint64 { 552 return tbl.entry.GetID() 553 } 554 555 func (tbl *txnTable) Close() error { 556 var err error 557 if tbl.tableSpace != nil { 558 if err = tbl.tableSpace.Close(); err != nil { 559 return err 560 } 561 tbl.tableSpace = nil 562 } 563 tbl.deleteNodes = nil 564 tbl.logs = nil 565 tbl.txnEntries = nil 566 return nil 567 } 568 569 func (tbl *txnTable) AddDeleteNode(id *common.ID, node txnif.DeleteNode) error { 570 nid := *id 571 u := tbl.deleteNodes[nid] 572 if u != nil { 573 for _, n := range u.DeleteNodes { 574 if n.IsPersistedDeletedNode() == node.IsPersistedDeletedNode() { 575 return ErrDuplicateNode 576 } 577 } 578 u.DeleteNodes = append(u.DeleteNodes, node) 579 if !node.IsPersistedDeletedNode() { 580 u.idx = append(u.idx, tbl.txnEntries.Len()) 581 } 582 } else { 583 tbl.store.IncreateWriteCnt() 584 tbl.store.txn.GetMemo().AddObject(tbl.entry.GetDB().ID, id.TableID, id.ObjectID()) 585 tbl.deleteNodes[nid] = newDeleteNode(node, tbl.txnEntries.Len()) 586 } 587 tbl.txnEntries.Append(node) 588 return nil 589 } 590 591 func (tbl *txnTable) Append(ctx context.Context, data *containers.Batch) (err error) { 592 if tbl.schema.HasPK() && !tbl.schema.IsSecondaryIndexTable() { 593 dedupType := tbl.store.txn.GetDedupType() 594 if dedupType == txnif.FullDedup { 595 //do PK deduplication check against txn's work space. 596 if err = tbl.DedupWorkSpace( 597 data.Vecs[tbl.schema.GetSingleSortKeyIdx()]); err != nil { 598 return 599 } 600 //do PK deduplication check against txn's snapshot data. 601 if err = tbl.DedupSnapByPK( 602 ctx, 603 data.Vecs[tbl.schema.GetSingleSortKeyIdx()], false); err != nil { 604 return 605 } 606 } else if dedupType == txnif.FullSkipWorkSpaceDedup { 607 if err = tbl.DedupSnapByPK( 608 ctx, 609 data.Vecs[tbl.schema.GetSingleSortKeyIdx()], false); err != nil { 610 return 611 } 612 } else if dedupType == txnif.IncrementalDedup { 613 if err = tbl.DedupSnapByPK( 614 ctx, 615 data.Vecs[tbl.schema.GetSingleSortKeyIdx()], true); err != nil { 616 return 617 } 618 } 619 } 620 if tbl.tableSpace == nil { 621 tbl.tableSpace = newTableSpace(tbl) 622 } 623 return tbl.tableSpace.Append(data) 624 } 625 func (tbl *txnTable) AddObjsWithMetaLoc(ctx context.Context, stats containers.Vector) (err error) { 626 return stats.Foreach(func(v any, isNull bool, row int) error { 627 s := objectio.ObjectStats(v.([]byte)) 628 return tbl.addObjsWithMetaLoc(ctx, s) 629 }, nil) 630 } 631 func (tbl *txnTable) addObjsWithMetaLoc(ctx context.Context, stats objectio.ObjectStats) (err error) { 632 var pkVecs []containers.Vector 633 var closeFuncs []func() 634 defer func() { 635 for _, v := range pkVecs { 636 v.Close() 637 } 638 for _, f := range closeFuncs { 639 f() 640 } 641 }() 642 if tbl.tableSpace != nil && tbl.tableSpace.isStatsExisted(stats) { 643 return nil 644 } 645 metaLocs := make([]objectio.Location, 0) 646 blkCount := stats.BlkCnt() 647 totalRow := stats.Rows() 648 blkMaxRows := tbl.schema.BlockMaxRows 649 for i := uint16(0); i < uint16(blkCount); i++ { 650 var blkRow uint32 651 if totalRow > blkMaxRows { 652 blkRow = blkMaxRows 653 } else { 654 blkRow = totalRow 655 } 656 totalRow -= blkRow 657 metaloc := objectio.BuildLocation(stats.ObjectName(), stats.Extent(), blkRow, i) 658 659 metaLocs = append(metaLocs, metaloc) 660 } 661 if tbl.schema.HasPK() && !tbl.schema.IsSecondaryIndexTable() { 662 dedupType := tbl.store.txn.GetDedupType() 663 if dedupType == txnif.FullDedup { 664 //TODO::parallel load pk. 665 for _, loc := range metaLocs { 666 var vectors []containers.Vector 667 var closeFunc func() 668 //Extend lifetime of vectors is within the function. 669 //No NeedCopy. closeFunc is required after use. 670 //VectorPool is nil. 671 vectors, closeFunc, err = blockio.LoadColumns2( 672 ctx, 673 []uint16{uint16(tbl.schema.GetSingleSortKeyIdx())}, 674 nil, 675 tbl.store.rt.Fs.Service, 676 loc, 677 fileservice.Policy(0), 678 false, 679 nil, 680 ) 681 if err != nil { 682 return err 683 } 684 closeFuncs = append(closeFuncs, closeFunc) 685 pkVecs = append(pkVecs, vectors[0]) 686 } 687 for _, v := range pkVecs { 688 //do PK deduplication check against txn's work space. 689 if err = tbl.DedupWorkSpace(v); err != nil { 690 return 691 } 692 //do PK deduplication check against txn's snapshot data. 693 if err = tbl.DedupSnapByPK(ctx, v, false); err != nil { 694 return 695 } 696 } 697 } else if dedupType == txnif.FullSkipWorkSpaceDedup { 698 //do PK deduplication check against txn's snapshot data. 699 if err = tbl.DedupSnapByMetaLocs(ctx, metaLocs, false); err != nil { 700 return 701 } 702 } else if dedupType == txnif.IncrementalDedup { 703 //do PK deduplication check against txn's snapshot data. 704 if err = tbl.DedupSnapByMetaLocs(ctx, metaLocs, true); err != nil { 705 return 706 } 707 } 708 } 709 if tbl.tableSpace == nil { 710 tbl.tableSpace = newTableSpace(tbl) 711 } 712 return tbl.tableSpace.AddObjsWithMetaLoc(pkVecs, stats) 713 } 714 715 func (tbl *txnTable) RangeDeleteLocalRows(start, end uint32) (err error) { 716 if tbl.tableSpace != nil { 717 err = tbl.tableSpace.RangeDelete(start, end) 718 } 719 return 720 } 721 722 func (tbl *txnTable) LocalDeletesToString() string { 723 s := fmt.Sprintf("<txnTable-%d>[LocalDeletes]:\n", tbl.GetID()) 724 if tbl.tableSpace != nil { 725 s = fmt.Sprintf("%s%s", s, tbl.tableSpace.DeletesToString()) 726 } 727 return s 728 } 729 730 func (tbl *txnTable) IsLocalDeleted(row uint32) bool { 731 if tbl.tableSpace == nil { 732 return false 733 } 734 return tbl.tableSpace.IsDeleted(row) 735 } 736 737 // RangeDelete delete block rows in range [start, end] 738 func (tbl *txnTable) RangeDelete( 739 id *common.ID, 740 start, 741 end uint32, 742 pk containers.Vector, 743 dt handle.DeleteType) (err error) { 744 defer func() { 745 if err == nil { 746 return 747 } 748 // if moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict) { 749 // moerr.NewTxnWriteConflictNoCtx("table-%d blk-%d delete rows from %d to %d", 750 // id.TableID, 751 // id.BlockID, 752 // start, 753 // end) 754 // } 755 // This err also captured by txn's write conflict check. 756 if err != nil { 757 if moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict) { 758 err = moerr.NewTxnWWConflictNoCtx(id.TableID, pk.PPString(int(start-end+1))) 759 } 760 761 logutil.Debugf("[ts=%s]: table-%d blk-%s delete rows from %d to %d %v", 762 tbl.store.txn.GetStartTS().ToString(), 763 id.TableID, 764 id.BlockID.String(), 765 start, 766 end, 767 err) 768 if tbl.store.rt.Options.IncrementalDedup && moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict) { 769 logutil.Warnf("[txn%X,ts=%s]: table-%d blk-%s delete rows [%d,%d] pk %s", 770 tbl.store.txn.GetID(), 771 tbl.store.txn.GetStartTS().ToString(), 772 id.TableID, 773 id.BlockID.String(), 774 start, end, 775 pk.PPString(int(start-end+1)), 776 ) 777 } 778 } 779 }() 780 if tbl.tableSpace != nil && id.ObjectID().Eq(tbl.tableSpace.entry.ID) { 781 err = tbl.RangeDeleteLocalRows(start, end) 782 return 783 } 784 node := tbl.getNormalDeleteNode(*id) 785 786 if node != nil { 787 // TODO: refactor 788 chain := node.GetChain().(*updates.DeleteChain) 789 mvcc := chain.GetController() 790 mvcc.Lock() 791 if err = mvcc.CheckNotDeleted(start, end, tbl.store.txn.GetStartTS()); err == nil { 792 node.RangeDeleteLocked(start, end, pk, common.WorkspaceAllocator) 793 } 794 if err != nil && moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict) { 795 logutil.Warn("w-w conflict", zap.String("chain", mvcc.StringLocked(common.PPL4, 0, ""))) 796 } 797 mvcc.Unlock() 798 if err != nil { 799 tbl.store.warChecker.Insert(mvcc.GetEntry()) 800 } 801 return 802 } 803 804 obj, err := tbl.store.warChecker.CacheGet( 805 tbl.entry.GetDB().ID, 806 id.TableID, id.ObjectID()) 807 if err != nil { 808 return 809 } 810 objData := obj.GetObjectData() 811 _, blkIdx := id.BlockID.Offsets() 812 node2, err := objData.RangeDelete(tbl.store.txn, blkIdx, start, end, pk, dt) 813 if err != nil && moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict) { 814 logutil.Warn("w-w conflict", zap.String("obj", objData.PPString(common.PPL4, 0, "", int(blkIdx)))) 815 } 816 if err == nil { 817 if err = tbl.AddDeleteNode(id, node2); err != nil { 818 return 819 } 820 tbl.store.warChecker.Insert(obj) 821 } 822 return 823 } 824 825 func (tbl *txnTable) TryDeleteByDeltaloc(id *common.ID, deltaloc objectio.Location) (ok bool, err error) { 826 node := tbl.deleteNodes[*id] 827 if node != nil { 828 return 829 } 830 831 obj, err := tbl.store.warChecker.CacheGet( 832 tbl.entry.GetDB().ID, 833 id.TableID, id.ObjectID()) 834 if err != nil { 835 return 836 } 837 objData := obj.GetObjectData() 838 _, blkIdx := id.BlockID.Offsets() 839 node2, ok, err := objData.TryDeleteByDeltaloc(tbl.store.txn, blkIdx, deltaloc) 840 if err == nil && ok { 841 tbl.txnEntries.Append(node2) 842 tbl.store.warChecker.Insert(obj) 843 tbl.store.IncreateWriteCnt() 844 } 845 return 846 } 847 848 func (tbl *txnTable) GetByFilter(ctx context.Context, filter *handle.Filter) (id *common.ID, offset uint32, err error) { 849 if tbl.tableSpace != nil { 850 id, offset, err = tbl.tableSpace.GetByFilter(filter) 851 if err == nil { 852 return 853 } 854 err = nil 855 } 856 h := newRelation(tbl) 857 blockIt := h.MakeObjectIt() 858 for blockIt.Valid() { 859 h := blockIt.GetObject() 860 defer h.Close() 861 if h.IsUncommitted() { 862 blockIt.Next() 863 continue 864 } 865 var blkID uint16 866 blkID, offset, err = h.GetByFilter(ctx, filter, common.WorkspaceAllocator) 867 if err == nil { 868 id = h.Fingerprint() 869 id.SetBlockOffset(blkID) 870 break 871 } 872 blockIt.Next() 873 } 874 if err == nil && id == nil { 875 err = moerr.NewNotFoundNoCtx() 876 } 877 return 878 } 879 880 func (tbl *txnTable) GetLocalValue(row uint32, col uint16) (v any, isNull bool, err error) { 881 if tbl.tableSpace == nil { 882 return 883 } 884 return tbl.tableSpace.GetValue(row, col) 885 } 886 887 func (tbl *txnTable) GetValue(ctx context.Context, id *common.ID, row uint32, col uint16) (v any, isNull bool, err error) { 888 if tbl.tableSpace != nil && id.ObjectID().Eq(tbl.tableSpace.entry.ID) { 889 return tbl.tableSpace.GetValue(row, col) 890 } 891 meta, err := tbl.store.warChecker.CacheGet( 892 tbl.entry.GetDB().ID, 893 id.TableID, 894 id.ObjectID()) 895 if err != nil { 896 panic(err) 897 } 898 block := meta.GetObjectData() 899 _, blkIdx := id.BlockID.Offsets() 900 return block.GetValue(ctx, tbl.store.txn, tbl.GetLocalSchema(), blkIdx, int(row), int(col), common.WorkspaceAllocator) 901 } 902 func (tbl *txnTable) UpdateObjectStats(id *common.ID, stats *objectio.ObjectStats) error { 903 meta, err := tbl.entry.GetObjectByID(id.ObjectID()) 904 if err != nil { 905 return err 906 } 907 isNewNode, err := meta.UpdateObjectInfo(tbl.store.txn, stats) 908 if err != nil { 909 return err 910 } 911 tbl.store.txn.GetMemo().AddObject(tbl.entry.GetDB().ID, tbl.entry.ID, &meta.ID) 912 if isNewNode { 913 tbl.txnEntries.Append(meta) 914 } 915 return nil 916 } 917 918 func (tbl *txnTable) UpdateDeltaLoc(id *common.ID, deltaloc objectio.Location) (err error) { 919 meta, err := tbl.store.warChecker.CacheGet( 920 tbl.entry.GetDB().ID, 921 id.TableID, 922 id.ObjectID()) 923 if err != nil { 924 panic(err) 925 } 926 _, blkIdx := id.BlockID.Offsets() 927 isNewNode, entry, err := meta.GetObjectData().UpdateDeltaLoc(tbl.store.txn, blkIdx, deltaloc) 928 if err != nil { 929 return 930 } 931 tbl.store.txn.GetMemo().AddObject(tbl.entry.GetDB().ID, id.TableID, id.ObjectID()) 932 if isNewNode { 933 tbl.txnEntries.Append(entry) 934 } 935 meta.Is1PC() 936 return 937 } 938 939 func (tbl *txnTable) AlterTable(ctx context.Context, req *apipb.AlterTableReq) error { 940 switch req.Kind { 941 case apipb.AlterKind_UpdateConstraint, 942 apipb.AlterKind_UpdateComment, 943 apipb.AlterKind_AddColumn, 944 apipb.AlterKind_DropColumn, 945 apipb.AlterKind_RenameTable, 946 apipb.AlterKind_UpdatePolicy, 947 apipb.AlterKind_AddPartition, 948 apipb.AlterKind_RenameColumn: 949 default: 950 return moerr.NewNYI(ctx, "alter table %s", req.Kind.String()) 951 } 952 tbl.store.IncreateWriteCnt() 953 tbl.store.txn.GetMemo().AddCatalogChange() 954 isNewNode, newSchema, err := tbl.entry.AlterTable(ctx, tbl.store.txn, req) 955 if isNewNode { 956 tbl.txnEntries.Append(tbl.entry) 957 } 958 if err != nil { 959 return err 960 } 961 if req.Kind == apipb.AlterKind_RenameTable { 962 rename := req.GetRenameTable() 963 // udpate name index in db entry 964 tenantID := newSchema.AcInfo.TenantID 965 err = tbl.entry.GetDB().RenameTableInTxn(rename.OldName, rename.NewName, tbl.entry.ID, tenantID, tbl.store.txn, isNewNode) 966 if err != nil { 967 return err 968 } 969 } 970 971 tbl.schema = newSchema // update new schema to txn local schema 972 //TODO(aptend): handle written data in localobj, keep the batch aligned with the new schema 973 return err 974 } 975 976 func (tbl *txnTable) UncommittedRows() uint32 { 977 if tbl.tableSpace == nil { 978 return 0 979 } 980 return tbl.tableSpace.Rows() 981 } 982 func (tbl *txnTable) NeedRollback() bool { 983 return tbl.createEntry != nil && tbl.dropEntry != nil 984 } 985 986 // PrePrepareDedup do deduplication check for 1PC Commit or 2PC Prepare 987 func (tbl *txnTable) PrePrepareDedup(ctx context.Context) (err error) { 988 if tbl.tableSpace == nil || !tbl.schema.HasPK() || tbl.schema.IsSecondaryIndexTable() { 989 return 990 } 991 var zm index.ZM 992 pkColPos := tbl.schema.GetSingleSortKeyIdx() 993 for _, node := range tbl.tableSpace.nodes { 994 if node.IsPersisted() { 995 err = tbl.DoPrecommitDedupByNode(ctx, node) 996 if err != nil { 997 return 998 } 999 continue 1000 } 1001 pkVec, err := node.WindowColumn(0, node.Rows(), pkColPos) 1002 if err != nil { 1003 return err 1004 } 1005 if zm.Valid() { 1006 zm.ResetMinMax() 1007 } else { 1008 pkType := pkVec.GetType() 1009 zm = index.NewZM(pkType.Oid, pkType.Scale) 1010 } 1011 if err = index.BatchUpdateZM(zm, pkVec.GetDownstreamVector()); err != nil { 1012 pkVec.Close() 1013 return err 1014 } 1015 if err = tbl.DoPrecommitDedupByPK(pkVec, zm); err != nil { 1016 pkVec.Close() 1017 return err 1018 } 1019 pkVec.Close() 1020 } 1021 return 1022 } 1023 1024 func (tbl *txnTable) updateDedupedObjectHintAndBlockID(hint uint64, id *types.Blockid) { 1025 if tbl.dedupedObjectHint == 0 { 1026 tbl.dedupedObjectHint = hint 1027 tbl.dedupedBlockID = id 1028 return 1029 } 1030 if tbl.dedupedObjectHint > hint { 1031 tbl.dedupedObjectHint = hint 1032 tbl.dedupedObjectHint = hint 1033 return 1034 } 1035 if tbl.dedupedObjectHint == hint && tbl.dedupedBlockID.Compare(*id) > 0 { 1036 tbl.dedupedBlockID = id 1037 } 1038 } 1039 1040 func (tbl *txnTable) quickSkipThisObject( 1041 ctx context.Context, 1042 keysZM index.ZM, 1043 meta *catalog.ObjectEntry, 1044 ) (ok bool, err error) { 1045 zm, err := meta.GetPKZoneMap(ctx, tbl.store.rt.Fs.Service) 1046 if err != nil { 1047 return 1048 } 1049 ok = !zm.FastIntersect(keysZM) 1050 return 1051 } 1052 1053 func (tbl *txnTable) tryGetCurrentObjectBF( 1054 ctx context.Context, 1055 currLocation objectio.Location, 1056 prevBF objectio.BloomFilter, 1057 prevObjName *objectio.ObjectNameShort, 1058 ) (currBf objectio.BloomFilter, err error) { 1059 if len(currLocation) == 0 { 1060 return 1061 } 1062 if objectio.IsSameObjectLocVsShort(currLocation, prevObjName) { 1063 currBf = prevBF 1064 return 1065 } 1066 currBf, err = objectio.FastLoadBF( 1067 ctx, 1068 currLocation, 1069 false, 1070 tbl.store.rt.Fs.Service, 1071 ) 1072 return 1073 } 1074 1075 // DedupSnapByPK 1. checks whether these primary keys exist in the list of block 1076 // which are visible and not dropped at txn's snapshot timestamp. 1077 // 2. It is called when appending data into this table. 1078 func (tbl *txnTable) DedupSnapByPK(ctx context.Context, keys containers.Vector, dedupAfterSnapshotTS bool) (err error) { 1079 r := trace.StartRegion(ctx, "DedupSnapByPK") 1080 defer r.End() 1081 it := newObjectItOnSnap(tbl) 1082 maxObjectHint := uint64(0) 1083 pkType := keys.GetType() 1084 keysZM := index.NewZM(pkType.Oid, pkType.Scale) 1085 if err = index.BatchUpdateZM(keysZM, keys.GetDownstreamVector()); err != nil { 1086 return 1087 } 1088 var ( 1089 name objectio.ObjectNameShort 1090 bf objectio.BloomFilter 1091 ) 1092 maxBlockID := &types.Blockid{} 1093 for it.Valid() { 1094 objH := it.GetObject() 1095 obj := objH.GetMeta().(*catalog.ObjectEntry) 1096 objH.Close() 1097 ObjectHint := obj.SortHint 1098 if ObjectHint > maxObjectHint { 1099 maxObjectHint = ObjectHint 1100 } 1101 objData := obj.GetObjectData() 1102 if objData == nil { 1103 it.Next() 1104 continue 1105 } 1106 if dedupAfterSnapshotTS && objData.CoarseCheckAllRowsCommittedBefore(tbl.store.txn.GetSnapshotTS()) { 1107 it.Next() 1108 continue 1109 } 1110 var rowmask *roaring.Bitmap 1111 if len(tbl.deleteNodes) > 0 { 1112 fp := obj.AsCommonID() 1113 deleteNode := tbl.getNormalDeleteNode(*fp) 1114 if deleteNode != nil { 1115 rowmask = deleteNode.GetRowMaskRefLocked() 1116 } 1117 } 1118 stats := obj.GetObjectStats() 1119 if !stats.ObjectLocation().IsEmpty() { 1120 var skip bool 1121 if skip, err = tbl.quickSkipThisObject(ctx, keysZM, obj); err != nil { 1122 return 1123 } else if skip { 1124 it.Next() 1125 continue 1126 } 1127 } 1128 if obj.HasCommittedPersistedData() { 1129 if bf, err = tbl.tryGetCurrentObjectBF( 1130 ctx, 1131 stats.ObjectLocation(), 1132 bf, 1133 &name, 1134 ); err != nil { 1135 return 1136 } 1137 } 1138 name = *stats.ObjectShortName() 1139 1140 if err = objData.BatchDedup( 1141 ctx, 1142 tbl.store.txn, 1143 keys, 1144 keysZM, 1145 rowmask, 1146 false, 1147 bf, 1148 common.WorkspaceAllocator, 1149 ); err != nil { 1150 // logutil.Infof("%s, %s, %v", obj.String(), rowmask, err) 1151 return 1152 } 1153 it.Next() 1154 } 1155 tbl.updateDedupedObjectHintAndBlockID(maxObjectHint, maxBlockID) 1156 return 1157 } 1158 1159 // DedupSnapByMetaLocs 1. checks whether the Primary Key of all the input blocks exist in the list of block 1160 // which are visible and not dropped at txn's snapshot timestamp. 1161 // 2. It is called when appending blocks into this table. 1162 func (tbl *txnTable) DedupSnapByMetaLocs(ctx context.Context, metaLocs []objectio.Location, dedupAfterSnapshotTS bool) (err error) { 1163 loaded := make(map[int]containers.Vector) 1164 maxObjectHint := uint64(0) 1165 maxBlockID := &types.Blockid{} 1166 for i, loc := range metaLocs { 1167 it := newObjectItOnSnap(tbl) 1168 for it.Valid() { 1169 obj := it.GetObject().GetMeta().(*catalog.ObjectEntry) 1170 ObjectHint := obj.SortHint 1171 if ObjectHint > maxObjectHint { 1172 maxObjectHint = ObjectHint 1173 } 1174 objData := obj.GetObjectData() 1175 if objData == nil { 1176 it.Next() 1177 continue 1178 } 1179 1180 // if it is in the incremental deduplication scenario 1181 // coarse check whether all rows in this block are committed before the snapshot timestamp 1182 // if true, skip this block's deduplication 1183 if dedupAfterSnapshotTS && 1184 objData.CoarseCheckAllRowsCommittedBefore(tbl.store.txn.GetSnapshotTS()) { 1185 it.Next() 1186 continue 1187 } 1188 1189 var rowmask *roaring.Bitmap 1190 if len(tbl.deleteNodes) > 0 { 1191 fp := obj.AsCommonID() 1192 deleteNode := tbl.getNormalDeleteNode(*fp) 1193 if deleteNode != nil { 1194 rowmask = deleteNode.GetRowMaskRefLocked() 1195 } 1196 } 1197 //TODO::laod zm index first, then load pk column if necessary. 1198 _, ok := loaded[i] 1199 if !ok { 1200 //Extend lifetime of vectors is within the function. 1201 //No NeedCopy. closeFunc is required after use. 1202 //VectorPool is nil. 1203 vectors, closeFunc, err := blockio.LoadColumns2( 1204 ctx, 1205 []uint16{uint16(tbl.schema.GetSingleSortKeyIdx())}, 1206 nil, 1207 tbl.store.rt.Fs.Service, 1208 loc, 1209 fileservice.Policy(0), 1210 false, 1211 nil, 1212 ) 1213 if err != nil { 1214 return err 1215 } 1216 defer closeFunc() 1217 loaded[i] = vectors[0] 1218 } 1219 if err = objData.BatchDedup( 1220 ctx, 1221 tbl.store.txn, 1222 loaded[i], 1223 nil, 1224 rowmask, 1225 false, 1226 objectio.BloomFilter{}, 1227 common.WorkspaceAllocator, 1228 ); err != nil { 1229 // logutil.Infof("%s, %s, %v", obj.String(), rowmask, err) 1230 loaded[i].Close() 1231 return 1232 } 1233 it.Next() 1234 } 1235 if v, ok := loaded[i]; ok { 1236 v.Close() 1237 } 1238 tbl.updateDedupedObjectHintAndBlockID(maxObjectHint, maxBlockID) 1239 } 1240 return 1241 } 1242 1243 // DoPrecommitDedupByPK 1. it do deduplication by traversing all the Objects/blocks, and 1244 // skipping over some blocks/Objects which being active or drop-committed or aborted; 1245 // 2. it is called when txn dequeues from preparing queue. 1246 // 3. we should make this function run quickly as soon as possible. 1247 // TODO::it would be used to do deduplication with the logtail. 1248 func (tbl *txnTable) DoPrecommitDedupByPK(pks containers.Vector, pksZM index.ZM) (err error) { 1249 moprobe.WithRegion(context.Background(), moprobe.TxnTableDoPrecommitDedupByPK, func() { 1250 objIt := tbl.entry.MakeObjectIt(false) 1251 for objIt.Valid() { 1252 obj := objIt.Get().GetPayload() 1253 if obj.SortHint < tbl.dedupedObjectHint { 1254 break 1255 } 1256 { 1257 obj.RLock() 1258 //FIXME:: Why need to wait committing here? waiting had happened at Dedup. 1259 //needwait, txnToWait := obj.NeedWaitCommitting(tbl.store.txn.GetStartTS()) 1260 //if needwait { 1261 // obj.RUnlock() 1262 // txnToWait.GetTxnState(true) 1263 // obj.RLock() 1264 //} 1265 shouldSkip := obj.HasDropCommittedLocked() || obj.IsCreatingOrAborted() 1266 obj.RUnlock() 1267 if shouldSkip { 1268 objIt.Next() 1269 continue 1270 } 1271 } 1272 objData := obj.GetObjectData() 1273 var rowmask *roaring.Bitmap 1274 if len(tbl.deleteNodes) > 0 { 1275 if tbl.store.warChecker.HasConflict(obj.ID) { 1276 continue 1277 } 1278 fp := obj.AsCommonID() 1279 deleteNode := tbl.getNormalDeleteNode(*fp) 1280 if deleteNode != nil { 1281 rowmask = deleteNode.GetRowMaskRefLocked() 1282 } 1283 } 1284 if err = objData.BatchDedup( 1285 context.Background(), 1286 tbl.store.txn, 1287 pks, 1288 pksZM, 1289 rowmask, 1290 true, 1291 objectio.BloomFilter{}, 1292 common.WorkspaceAllocator, 1293 ); err != nil { 1294 return 1295 } 1296 objIt.Next() 1297 } 1298 }) 1299 return 1300 } 1301 1302 func (tbl *txnTable) DoPrecommitDedupByNode(ctx context.Context, node InsertNode) (err error) { 1303 objIt := tbl.entry.MakeObjectIt(false) 1304 var pks containers.Vector 1305 //loaded := false 1306 for objIt.Valid() { 1307 obj := objIt.Get().GetPayload() 1308 if obj.SortHint < tbl.dedupedObjectHint { 1309 break 1310 } 1311 { 1312 obj.RLock() 1313 //FIXME:: Why need to wait committing here? waiting had happened at Dedup. 1314 //needwait, txnToWait := obj.NeedWaitCommitting(tbl.store.txn.GetStartTS()) 1315 //if needwait { 1316 // obj.RUnlock() 1317 // txnToWait.GetTxnState(true) 1318 // obj.RLock() 1319 //} 1320 shouldSkip := obj.HasDropCommittedLocked() || obj.IsCreatingOrAborted() 1321 obj.RUnlock() 1322 if shouldSkip { 1323 objIt.Next() 1324 continue 1325 } 1326 } 1327 1328 //TODO::load ZM/BF index first, then load PK column if necessary. 1329 if pks == nil { 1330 colV, err := node.GetColumnDataById(ctx, tbl.schema.GetSingleSortKeyIdx(), common.WorkspaceAllocator) 1331 if err != nil { 1332 return err 1333 } 1334 colV.ApplyDeletes() 1335 pks = colV.Orphan() 1336 defer pks.Close() 1337 } 1338 err = nil 1339 objData := obj.GetObjectData() 1340 var rowmask *roaring.Bitmap 1341 if len(tbl.deleteNodes) > 0 { 1342 if tbl.store.warChecker.HasConflict(obj.ID) { 1343 continue 1344 } 1345 fp := obj.AsCommonID() 1346 deleteNode := tbl.getNormalDeleteNode(*fp) 1347 if deleteNode != nil { 1348 rowmask = deleteNode.GetRowMaskRefLocked() 1349 } 1350 } 1351 if err = objData.BatchDedup( 1352 context.Background(), 1353 tbl.store.txn, 1354 pks, 1355 nil, 1356 rowmask, 1357 true, 1358 objectio.BloomFilter{}, 1359 common.WorkspaceAllocator, 1360 ); err != nil { 1361 return err 1362 } 1363 objIt.Next() 1364 } 1365 return 1366 } 1367 1368 func (tbl *txnTable) DedupWorkSpace(key containers.Vector) (err error) { 1369 index := NewSimpleTableIndex() 1370 //Check whether primary key is duplicated. 1371 if err = index.BatchInsert( 1372 tbl.schema.GetSingleSortKey().Name, 1373 key, 1374 0, 1375 key.Length(), 1376 0, 1377 true); err != nil { 1378 return 1379 } 1380 1381 if tbl.tableSpace != nil { 1382 //Check whether primary key is duplicated in txn's workspace. 1383 if err = tbl.tableSpace.BatchDedup(key); err != nil { 1384 return 1385 } 1386 } 1387 return 1388 } 1389 1390 func (tbl *txnTable) DoBatchDedup(key containers.Vector) (err error) { 1391 index := NewSimpleTableIndex() 1392 //Check whether primary key is duplicated. 1393 if err = index.BatchInsert( 1394 tbl.schema.GetSingleSortKey().Name, 1395 key, 1396 0, 1397 key.Length(), 1398 0, 1399 true); err != nil { 1400 return 1401 } 1402 1403 if tbl.tableSpace != nil { 1404 //Check whether primary key is duplicated in txn's workspace. 1405 if err = tbl.tableSpace.BatchDedup(key); err != nil { 1406 return 1407 } 1408 } 1409 //Check whether primary key is duplicated in txn's snapshot data. 1410 err = tbl.DedupSnapByPK(context.Background(), key, false) 1411 return 1412 } 1413 1414 func (tbl *txnTable) BatchDedupLocal(bat *containers.Batch) (err error) { 1415 if tbl.tableSpace == nil || !tbl.schema.HasPK() { 1416 return 1417 } 1418 err = tbl.tableSpace.BatchDedup(bat.Vecs[tbl.schema.GetSingleSortKeyIdx()]) 1419 return 1420 } 1421 1422 func (tbl *txnTable) PrepareRollback() (err error) { 1423 for idx, txnEntry := range tbl.txnEntries.entries { 1424 if tbl.txnEntries.IsDeleted(idx) { 1425 continue 1426 } 1427 if err = txnEntry.PrepareRollback(); err != nil { 1428 break 1429 } 1430 } 1431 return 1432 } 1433 1434 func (tbl *txnTable) ApplyAppend() (err error) { 1435 if tbl.tableSpace != nil { 1436 err = tbl.tableSpace.ApplyAppend() 1437 } 1438 return 1439 } 1440 1441 func (tbl *txnTable) PrePrepare() (err error) { 1442 if tbl.tableSpace != nil { 1443 err = tbl.tableSpace.PrepareApply() 1444 } 1445 return 1446 } 1447 1448 func (tbl *txnTable) dumpCore(errMsg string) { 1449 var errInfo bytes.Buffer 1450 errInfo.WriteString(fmt.Sprintf("Table: %s", tbl.entry.String())) 1451 errInfo.WriteString(fmt.Sprintf("\nTxn: %s", tbl.store.txn.String())) 1452 errInfo.WriteString(fmt.Sprintf("\nErr: %s", errMsg)) 1453 logutil.Error(errInfo.String()) 1454 util.EnableCoreDump() 1455 util.CoreDump() 1456 } 1457 1458 func (tbl *txnTable) PrepareCommit() (err error) { 1459 for idx, node := range tbl.txnEntries.entries { 1460 if tbl.txnEntries.IsDeleted(idx) { 1461 continue 1462 } 1463 if err = node.PrepareCommit(); err != nil { 1464 if moerr.IsMoErrCode(err, moerr.ErrTxnNotFound) { 1465 var buf bytes.Buffer 1466 buf.WriteString(fmt.Sprintf("%d/%d No Txn", idx, len(tbl.txnEntries.entries))) 1467 tbl.dumpCore(buf.String()) 1468 } 1469 break 1470 } 1471 } 1472 return 1473 } 1474 1475 func (tbl *txnTable) PreApplyCommit() (err error) { 1476 return tbl.ApplyAppend() 1477 } 1478 1479 func (tbl *txnTable) ApplyCommit() (err error) { 1480 csn := tbl.csnStart 1481 for idx, node := range tbl.txnEntries.entries { 1482 if tbl.txnEntries.IsDeleted(idx) { 1483 continue 1484 } 1485 if node.Is1PC() { 1486 continue 1487 } 1488 if err = node.ApplyCommit(); err != nil { 1489 break 1490 } 1491 csn++ 1492 } 1493 return 1494 } 1495 1496 func (tbl *txnTable) Apply1PCCommit() (err error) { 1497 for idx, node := range tbl.txnEntries.entries { 1498 if tbl.txnEntries.IsDeleted(idx) { 1499 continue 1500 } 1501 if !node.Is1PC() { 1502 continue 1503 } 1504 if err = node.ApplyCommit(); err != nil { 1505 break 1506 } 1507 tbl.csnStart++ 1508 } 1509 return 1510 } 1511 func (tbl *txnTable) ApplyRollback() (err error) { 1512 csn := tbl.csnStart 1513 for idx, node := range tbl.txnEntries.entries { 1514 if tbl.txnEntries.IsDeleted(idx) { 1515 continue 1516 } 1517 if node.Is1PC() { 1518 continue 1519 } 1520 if err = node.ApplyRollback(); err != nil { 1521 break 1522 } 1523 csn++ 1524 } 1525 return 1526 } 1527 1528 func (tbl *txnTable) CleanUp() { 1529 if tbl.tableSpace != nil { 1530 tbl.tableSpace.CloseAppends() 1531 } 1532 }