github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/admin.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package interlock 15 16 import ( 17 "context" 18 "math" 19 20 "github.com/whtcorpsinc/BerolinaSQL/ast" 21 "github.com/whtcorpsinc/BerolinaSQL/perceptron" 22 "github.com/whtcorpsinc/BerolinaSQL/terror" 23 "github.com/whtcorpsinc/errors" 24 "github.com/whtcorpsinc/fidelpb/go-fidelpb" 25 "github.com/whtcorpsinc/milevadb/allegrosql" 26 "github.com/whtcorpsinc/milevadb/blockcodec" 27 "github.com/whtcorpsinc/milevadb/causet" 28 "github.com/whtcorpsinc/milevadb/causet/blocks" 29 causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded" 30 "github.com/whtcorpsinc/milevadb/ekv" 31 "github.com/whtcorpsinc/milevadb/schemareplicant" 32 "github.com/whtcorpsinc/milevadb/soliton" 33 "github.com/whtcorpsinc/milevadb/soliton/chunk" 34 "github.com/whtcorpsinc/milevadb/soliton/codec" 35 "github.com/whtcorpsinc/milevadb/soliton/logutil" 36 "github.com/whtcorpsinc/milevadb/soliton/ranger" 37 "github.com/whtcorpsinc/milevadb/soliton/timeutil" 38 "github.com/whtcorpsinc/milevadb/statistics" 39 "github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx" 40 "github.com/whtcorpsinc/milevadb/types" 41 "go.uber.org/zap" 42 ) 43 44 var ( 45 _ InterlockingDirectorate = &ChecHoTTexRangeInterDirc{} 46 _ InterlockingDirectorate = &RecoverIndexInterDirc{} 47 _ InterlockingDirectorate = &CleanupIndexInterDirc{} 48 ) 49 50 51 type ChecHoTTexRangeInterDirc struct { 52 baseInterlockingDirectorate 53 54 causet *perceptron.BlockInfo 55 index *perceptron.IndexInfo 56 is schemareplicant.SchemaReplicant 57 startKey []types.Causet 58 59 handleRanges []ast.HandleRange 60 srcChunk *chunk.Chunk 61 62 result allegrosql.SelectResult 63 defcaus []*perceptron.DeferredCausetInfo 64 } 65 66 // Next implements the InterlockingDirectorate Next interface. 67 func (e *ChecHoTTexRangeInterDirc) Next(ctx context.Context, req *chunk.Chunk) error { 68 req.Reset() 69 handleIdx := e.schemaReplicant.Len() - 1 70 for { 71 err := e.result.Next(ctx, e.srcChunk) 72 if err != nil { 73 return err 74 } 75 if e.srcChunk.NumEvents() == 0 { 76 return nil 77 } 78 iter := chunk.NewIterator4Chunk(e.srcChunk) 79 for event := iter.Begin(); event != iter.End(); event = iter.Next() { 80 handle := event.GetInt64(handleIdx) 81 for _, hr := range e.handleRanges { 82 if handle >= hr.Begin && handle < hr.End { 83 req.AppendEvent(event) 84 break 85 } 86 } 87 } 88 if req.NumEvents() > 0 { 89 return nil 90 } 91 } 92 } 93 94 // Open implements the InterlockingDirectorate Open interface. 95 func (e *ChecHoTTexRangeInterDirc) Open(ctx context.Context) error { 96 tDefCauss := e.causet.DefCauss() 97 for _, ic := range e.index.DeferredCausets { 98 defCaus := tDefCauss[ic.Offset] 99 e.defcaus = append(e.defcaus, defCaus) 100 } 101 102 defCausTypeForHandle := e.schemaReplicant.DeferredCausets[len(e.defcaus)].RetType 103 e.defcaus = append(e.defcaus, &perceptron.DeferredCausetInfo{ 104 ID: perceptron.ExtraHandleID, 105 Name: perceptron.ExtraHandleName, 106 FieldType: *defCausTypeForHandle, 107 }) 108 109 e.srcChunk = newFirstChunk(e) 110 posetPosetDagPB, err := e.buildPosetDagPB() 111 if err != nil { 112 return err 113 } 114 sc := e.ctx.GetStochastikVars().StmtCtx 115 txn, err := e.ctx.Txn(true) 116 if err != nil { 117 return nil 118 } 119 var builder allegrosql.RequestBuilder 120 ekvReq, err := builder.SetIndexRanges(sc, e.causet.ID, e.index.ID, ranger.FullRange()). 121 SetPosetDagRequest(posetPosetDagPB). 122 SetStartTS(txn.StartTS()). 123 SetKeepOrder(true). 124 SetFromStochastikVars(e.ctx.GetStochastikVars()). 125 Build() 126 if err != nil { 127 return err 128 } 129 130 e.result, err = allegrosql.Select(ctx, e.ctx, ekvReq, e.retFieldTypes, statistics.NewQueryFeedback(0, nil, 0, false)) 131 if err != nil { 132 return err 133 } 134 e.result.Fetch(ctx) 135 return nil 136 } 137 138 func (e *ChecHoTTexRangeInterDirc) buildPosetDagPB() (*fidelpb.PosetDagRequest, error) { 139 posetPosetDagReq := &fidelpb.PosetDagRequest{} 140 posetPosetDagReq.TimeZoneName, posetPosetDagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetStochastikVars().Location()) 141 sc := e.ctx.GetStochastikVars().StmtCtx 142 posetPosetDagReq.Flags = sc.PushDownFlags() 143 for i := range e.schemaReplicant.DeferredCausets { 144 posetPosetDagReq.OutputOffsets = append(posetPosetDagReq.OutputOffsets, uint32(i)) 145 } 146 execPB := e.constructIndexScanPB() 147 posetPosetDagReq.InterlockingDirectorates = append(posetPosetDagReq.InterlockingDirectorates, execPB) 148 149 err := causetembedded.SetPBDeferredCausetsDefaultValue(e.ctx, posetPosetDagReq.InterlockingDirectorates[0].IdxScan.DeferredCausets, e.defcaus) 150 if err != nil { 151 return nil, err 152 } 153 allegrosql.SetEncodeType(e.ctx, posetPosetDagReq) 154 return posetPosetDagReq, nil 155 } 156 157 func (e *ChecHoTTexRangeInterDirc) constructIndexScanPB() *fidelpb.InterlockingDirectorate { 158 idxInterDirc := &fidelpb.IndexScan{ 159 BlockId: e.causet.ID, 160 IndexId: e.index.ID, 161 DeferredCausets: soliton.DeferredCausetsToProto(e.defcaus, e.causet.PKIsHandle), 162 } 163 return &fidelpb.InterlockingDirectorate{Tp: fidelpb.InterDircType_TypeIndexScan, IdxScan: idxInterDirc} 164 } 165 166 // Close implements the InterlockingDirectorate Close interface. 167 func (e *ChecHoTTexRangeInterDirc) Close() error { 168 return nil 169 } 170 171 // RecoverIndexInterDirc represents a recover index interlock. 172 // It is built from "admin recover index" memex, is used to backfill 173 // corrupted index. 174 type RecoverIndexInterDirc struct { 175 baseInterlockingDirectorate 176 177 done bool 178 179 index causet.Index 180 causet causet.Block 181 physicalID int64 182 batchSize int 183 184 defCausumns []*perceptron.DeferredCausetInfo 185 defCausFieldTypes []*types.FieldType 186 srcChunk *chunk.Chunk 187 handleDefCauss causetembedded.HandleDefCauss 188 189 // below buf is used to reduce allocations. 190 recoverEvents []recoverEvents 191 idxValsBufs [][]types.Causet 192 idxKeyBufs [][]byte 193 batchKeys []ekv.Key 194 } 195 196 func (e *RecoverIndexInterDirc) defCausumnsTypes() []*types.FieldType { 197 if e.defCausFieldTypes != nil { 198 return e.defCausFieldTypes 199 } 200 201 e.defCausFieldTypes = make([]*types.FieldType, 0, len(e.defCausumns)) 202 for _, defCaus := range e.defCausumns { 203 e.defCausFieldTypes = append(e.defCausFieldTypes, &defCaus.FieldType) 204 } 205 return e.defCausFieldTypes 206 } 207 208 // Open implements the InterlockingDirectorate Open interface. 209 func (e *RecoverIndexInterDirc) Open(ctx context.Context) error { 210 if err := e.baseInterlockingDirectorate.Open(ctx); err != nil { 211 return err 212 } 213 214 e.srcChunk = chunk.New(e.defCausumnsTypes(), e.initCap, e.maxChunkSize) 215 e.batchSize = 2048 216 e.recoverEvents = make([]recoverEvents, 0, e.batchSize) 217 e.idxValsBufs = make([][]types.Causet, e.batchSize) 218 e.idxKeyBufs = make([][]byte, e.batchSize) 219 return nil 220 } 221 222 func (e *RecoverIndexInterDirc) constructBlockScanPB(tblInfo *perceptron.BlockInfo, defCausInfos []*perceptron.DeferredCausetInfo) (*fidelpb.InterlockingDirectorate, error) { 223 tblScan := blocks.BuildBlockScanFromInfos(tblInfo, defCausInfos) 224 tblScan.BlockId = e.physicalID 225 err := causetembedded.SetPBDeferredCausetsDefaultValue(e.ctx, tblScan.DeferredCausets, defCausInfos) 226 return &fidelpb.InterlockingDirectorate{Tp: fidelpb.InterDircType_TypeBlockScan, TblScan: tblScan}, err 227 } 228 229 func (e *RecoverIndexInterDirc) constructLimitPB(count uint64) *fidelpb.InterlockingDirectorate { 230 limitInterDirc := &fidelpb.Limit{ 231 Limit: count, 232 } 233 return &fidelpb.InterlockingDirectorate{Tp: fidelpb.InterDircType_TypeLimit, Limit: limitInterDirc} 234 } 235 236 func (e *RecoverIndexInterDirc) buildPosetDagPB(txn ekv.Transaction, limitCnt uint64) (*fidelpb.PosetDagRequest, error) { 237 posetPosetDagReq := &fidelpb.PosetDagRequest{} 238 posetPosetDagReq.TimeZoneName, posetPosetDagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetStochastikVars().Location()) 239 sc := e.ctx.GetStochastikVars().StmtCtx 240 posetPosetDagReq.Flags = sc.PushDownFlags() 241 for i := range e.defCausumns { 242 posetPosetDagReq.OutputOffsets = append(posetPosetDagReq.OutputOffsets, uint32(i)) 243 } 244 245 tblScanInterDirc, err := e.constructBlockScanPB(e.causet.Meta(), e.defCausumns) 246 if err != nil { 247 return nil, err 248 } 249 posetPosetDagReq.InterlockingDirectorates = append(posetPosetDagReq.InterlockingDirectorates, tblScanInterDirc) 250 251 limitInterDirc := e.constructLimitPB(limitCnt) 252 posetPosetDagReq.InterlockingDirectorates = append(posetPosetDagReq.InterlockingDirectorates, limitInterDirc) 253 allegrosql.SetEncodeType(e.ctx, posetPosetDagReq) 254 return posetPosetDagReq, nil 255 } 256 257 func (e *RecoverIndexInterDirc) buildBlockScan(ctx context.Context, txn ekv.Transaction, startHandle ekv.Handle, limitCnt uint64) (allegrosql.SelectResult, error) { 258 posetPosetDagPB, err := e.buildPosetDagPB(txn, limitCnt) 259 if err != nil { 260 return nil, err 261 } 262 var builder allegrosql.RequestBuilder 263 builder.KeyRanges, err = buildRecoverIndexKeyRanges(e.ctx.GetStochastikVars().StmtCtx, e.physicalID, startHandle) 264 if err != nil { 265 return nil, err 266 } 267 ekvReq, err := builder. 268 SetPosetDagRequest(posetPosetDagPB). 269 SetStartTS(txn.StartTS()). 270 SetKeepOrder(true). 271 SetFromStochastikVars(e.ctx.GetStochastikVars()). 272 Build() 273 if err != nil { 274 return nil, err 275 } 276 277 // Actually, with limitCnt, the match quantum maybe only in one region, so let the concurrency to be 1, 278 // avoid unnecessary region scan. 279 ekvReq.Concurrency = 1 280 result, err := allegrosql.Select(ctx, e.ctx, ekvReq, e.defCausumnsTypes(), statistics.NewQueryFeedback(0, nil, 0, false)) 281 if err != nil { 282 return nil, err 283 } 284 result.Fetch(ctx) 285 return result, nil 286 } 287 288 // buildRecoverIndexKeyRanges build a KeyRange: (startHandle, unlimited). 289 func buildRecoverIndexKeyRanges(sctx *stmtctx.StatementContext, tid int64, startHandle ekv.Handle) ([]ekv.KeyRange, error) { 290 var startKey []byte 291 if startHandle == nil { 292 startKey = blockcodec.EncodeEventKey(tid, []byte{codec.NilFlag}) 293 } else { 294 startKey = blockcodec.EncodeEventKey(tid, startHandle.Next().Encoded()) 295 } 296 maxVal, err := codec.EncodeKey(sctx, nil, types.MaxValueCauset()) 297 if err != nil { 298 return nil, errors.Trace(err) 299 } 300 endKey := blockcodec.EncodeEventKey(tid, maxVal) 301 return []ekv.KeyRange{{StartKey: startKey, EndKey: endKey}}, nil 302 } 303 304 type backfillResult struct { 305 currentHandle ekv.Handle 306 addedCount int64 307 scanEventCount int64 308 } 309 310 func (e *RecoverIndexInterDirc) backfillIndex(ctx context.Context) (int64, int64, error) { 311 var ( 312 currentHandle ekv.Handle = nil 313 totalAddedCnt = int64(0) 314 totalScanCnt = int64(0) 315 lastLogCnt = int64(0) 316 result backfillResult 317 ) 318 for { 319 errInTxn := ekv.RunInNewTxn(e.ctx.GetStore(), true, func(txn ekv.Transaction) error { 320 var err error 321 result, err = e.backfillIndexInTxn(ctx, txn, currentHandle) 322 return err 323 }) 324 if errInTxn != nil { 325 return totalAddedCnt, totalScanCnt, errInTxn 326 } 327 totalAddedCnt += result.addedCount 328 totalScanCnt += result.scanEventCount 329 if totalScanCnt-lastLogCnt >= 50000 { 330 lastLogCnt = totalScanCnt 331 logutil.Logger(ctx).Info("recover index", zap.String("causet", e.causet.Meta().Name.O), 332 zap.String("index", e.index.Meta().Name.O), zap.Int64("totalAddedCnt", totalAddedCnt), 333 zap.Int64("totalScanCnt", totalScanCnt), zap.Stringer("currentHandle", result.currentHandle)) 334 } 335 336 // no more rows 337 if result.scanEventCount == 0 { 338 break 339 } 340 currentHandle = result.currentHandle 341 } 342 return totalAddedCnt, totalScanCnt, nil 343 } 344 345 type recoverEvents struct { 346 handle ekv.Handle 347 idxVals []types.Causet 348 skip bool 349 } 350 351 func (e *RecoverIndexInterDirc) fetchRecoverEvents(ctx context.Context, srcResult allegrosql.SelectResult, result *backfillResult) ([]recoverEvents, error) { 352 e.recoverEvents = e.recoverEvents[:0] 353 idxValLen := len(e.index.Meta().DeferredCausets) 354 result.scanEventCount = 0 355 356 for { 357 err := srcResult.Next(ctx, e.srcChunk) 358 if err != nil { 359 return nil, err 360 } 361 362 if e.srcChunk.NumEvents() == 0 { 363 break 364 } 365 iter := chunk.NewIterator4Chunk(e.srcChunk) 366 for event := iter.Begin(); event != iter.End(); event = iter.Next() { 367 if result.scanEventCount >= int64(e.batchSize) { 368 return e.recoverEvents, nil 369 } 370 handle, err := e.handleDefCauss.BuildHandle(event) 371 if err != nil { 372 return nil, err 373 } 374 idxVals := extractIdxVals(event, e.idxValsBufs[result.scanEventCount], e.defCausFieldTypes, idxValLen) 375 e.idxValsBufs[result.scanEventCount] = idxVals 376 e.recoverEvents = append(e.recoverEvents, recoverEvents{handle: handle, idxVals: idxVals, skip: false}) 377 result.scanEventCount++ 378 result.currentHandle = handle 379 } 380 } 381 382 return e.recoverEvents, nil 383 } 384 385 func (e *RecoverIndexInterDirc) batchMarkDup(txn ekv.Transaction, rows []recoverEvents) error { 386 if len(rows) == 0 { 387 return nil 388 } 389 e.batchKeys = e.batchKeys[:0] 390 sc := e.ctx.GetStochastikVars().StmtCtx 391 distinctFlags := make([]bool, len(rows)) 392 for i, event := range rows { 393 idxKey, distinct, err := e.index.GenIndexKey(sc, event.idxVals, event.handle, e.idxKeyBufs[i]) 394 if err != nil { 395 return err 396 } 397 e.idxKeyBufs[i] = idxKey 398 399 e.batchKeys = append(e.batchKeys, idxKey) 400 distinctFlags[i] = distinct 401 } 402 403 values, err := txn.BatchGet(context.Background(), e.batchKeys) 404 if err != nil { 405 return err 406 } 407 408 // 1. unique-key is duplicate and the handle is equal, skip it. 409 // 2. unique-key is duplicate and the handle is not equal, data is not consistent, log it and skip it. 410 // 3. non-unique-key is duplicate, skip it. 411 isCommonHandle := e.causet.Meta().IsCommonHandle 412 for i, key := range e.batchKeys { 413 if val, found := values[string(key)]; found { 414 if distinctFlags[i] { 415 handle, err1 := blockcodec.DecodeHandleInUniqueIndexValue(val, isCommonHandle) 416 if err1 != nil { 417 return err1 418 } 419 420 if handle.Compare(rows[i].handle) != 0 { 421 logutil.BgLogger().Warn("recover index: the constraint of unique index is broken, handle in index is not equal to handle in causet", 422 zap.String("index", e.index.Meta().Name.O), zap.ByteString("indexKey", key), 423 zap.Stringer("handleInBlock", rows[i].handle), zap.Stringer("handleInIndex", handle)) 424 } 425 } 426 rows[i].skip = true 427 } 428 } 429 return nil 430 } 431 432 func (e *RecoverIndexInterDirc) backfillIndexInTxn(ctx context.Context, txn ekv.Transaction, currentHandle ekv.Handle) (result backfillResult, err error) { 433 srcResult, err := e.buildBlockScan(ctx, txn, currentHandle, uint64(e.batchSize)) 434 if err != nil { 435 return result, err 436 } 437 defer terror.Call(srcResult.Close) 438 439 rows, err := e.fetchRecoverEvents(ctx, srcResult, &result) 440 if err != nil { 441 return result, err 442 } 443 444 err = e.batchMarkDup(txn, rows) 445 if err != nil { 446 return result, err 447 } 448 449 // Constrains is already checked. 450 e.ctx.GetStochastikVars().StmtCtx.BatchCheck = true 451 for _, event := range rows { 452 if event.skip { 453 continue 454 } 455 456 recordKey := e.causet.RecordKey(event.handle) 457 err := txn.LockKeys(ctx, new(ekv.LockCtx), recordKey) 458 if err != nil { 459 return result, err 460 } 461 462 _, err = e.index.Create(e.ctx, txn.GetUnionStore(), event.idxVals, event.handle) 463 if err != nil { 464 return result, err 465 } 466 result.addedCount++ 467 } 468 return result, nil 469 } 470 471 // Next implements the InterlockingDirectorate Next interface. 472 func (e *RecoverIndexInterDirc) Next(ctx context.Context, req *chunk.Chunk) error { 473 req.Reset() 474 if e.done { 475 return nil 476 } 477 478 recoveringClusteredIndex := e.index.Meta().Primary && e.causet.Meta().IsCommonHandle 479 if recoveringClusteredIndex { 480 req.AppendInt64(0, 0) 481 req.AppendInt64(1, 0) 482 e.done = true 483 return nil 484 } 485 var totalAddedCnt, totalScanCnt int64 486 var err error 487 if tbl, ok := e.causet.(causet.PartitionedBlock); ok { 488 pi := e.causet.Meta().GetPartitionInfo() 489 for _, p := range pi.Definitions { 490 e.causet = tbl.GetPartition(p.ID) 491 e.index = blocks.GetWriblockIndexByName(e.index.Meta().Name.L, e.causet) 492 e.physicalID = p.ID 493 addedCnt, scanCnt, err := e.backfillIndex(ctx) 494 totalAddedCnt += addedCnt 495 totalScanCnt += scanCnt 496 if err != nil { 497 return err 498 } 499 } 500 } else { 501 totalAddedCnt, totalScanCnt, err = e.backfillIndex(ctx) 502 if err != nil { 503 return err 504 } 505 } 506 507 req.AppendInt64(0, totalAddedCnt) 508 req.AppendInt64(1, totalScanCnt) 509 e.done = true 510 return nil 511 } 512 513 // CleanupIndexInterDirc represents a cleanup index interlock. 514 // It is built from "admin cleanup index" memex, is used to delete 515 // dangling index data. 516 type CleanupIndexInterDirc struct { 517 baseInterlockingDirectorate 518 519 done bool 520 removeCnt uint64 521 522 index causet.Index 523 causet causet.Block 524 physicalID int64 525 526 defCausumns []*perceptron.DeferredCausetInfo 527 idxDefCausFieldTypes []*types.FieldType 528 idxChunk *chunk.Chunk 529 handleDefCauss causetembedded.HandleDefCauss 530 531 idxValues *ekv.HandleMap // ekv.Handle -> [][]types.Causet 532 batchSize uint64 533 batchKeys []ekv.Key 534 idxValsBufs [][]types.Causet 535 lastIdxKey []byte 536 scanEventCnt uint64 537 } 538 539 func (e *CleanupIndexInterDirc) getIdxDefCausTypes() []*types.FieldType { 540 if e.idxDefCausFieldTypes != nil { 541 return e.idxDefCausFieldTypes 542 } 543 e.idxDefCausFieldTypes = make([]*types.FieldType, 0, len(e.defCausumns)) 544 for _, defCaus := range e.defCausumns { 545 e.idxDefCausFieldTypes = append(e.idxDefCausFieldTypes, &defCaus.FieldType) 546 } 547 return e.idxDefCausFieldTypes 548 } 549 550 func (e *CleanupIndexInterDirc) batchGetRecord(txn ekv.Transaction) (map[string][]byte, error) { 551 e.idxValues.Range(func(h ekv.Handle, _ interface{}) bool { 552 e.batchKeys = append(e.batchKeys, e.causet.RecordKey(h)) 553 return true 554 }) 555 values, err := txn.BatchGet(context.Background(), e.batchKeys) 556 if err != nil { 557 return nil, err 558 } 559 return values, nil 560 } 561 562 func (e *CleanupIndexInterDirc) deleteDanglingIdx(txn ekv.Transaction, values map[string][]byte) error { 563 for _, k := range e.batchKeys { 564 if _, found := values[string(k)]; !found { 565 _, handle, err := blockcodec.DecodeRecordKey(k) 566 if err != nil { 567 return err 568 } 569 handleIdxValsGroup, ok := e.idxValues.Get(handle) 570 if !ok { 571 return errors.Trace(errors.Errorf("batch keys are inconsistent with handles")) 572 } 573 for _, handleIdxVals := range handleIdxValsGroup.([][]types.Causet) { 574 if err := e.index.Delete(e.ctx.GetStochastikVars().StmtCtx, txn, handleIdxVals, handle); err != nil { 575 return err 576 } 577 e.removeCnt++ 578 if e.removeCnt%e.batchSize == 0 { 579 logutil.BgLogger().Info("clean up dangling index", zap.String("causet", e.causet.Meta().Name.String()), 580 zap.String("index", e.index.Meta().Name.String()), zap.Uint64("count", e.removeCnt)) 581 } 582 } 583 } 584 } 585 return nil 586 } 587 588 func extractIdxVals(event chunk.Event, idxVals []types.Causet, 589 fieldTypes []*types.FieldType, idxValLen int) []types.Causet { 590 if cap(idxVals) < idxValLen { 591 idxVals = make([]types.Causet, idxValLen) 592 } else { 593 idxVals = idxVals[:idxValLen] 594 } 595 596 for i := 0; i < idxValLen; i++ { 597 defCausVal := event.GetCauset(i, fieldTypes[i]) 598 defCausVal.Copy(&idxVals[i]) 599 } 600 return idxVals 601 } 602 603 func (e *CleanupIndexInterDirc) fetchIndex(ctx context.Context, txn ekv.Transaction) error { 604 result, err := e.buildIndexScan(ctx, txn) 605 if err != nil { 606 return err 607 } 608 defer terror.Call(result.Close) 609 610 sc := e.ctx.GetStochastikVars().StmtCtx 611 idxDefCausLen := len(e.index.Meta().DeferredCausets) 612 for { 613 err := result.Next(ctx, e.idxChunk) 614 if err != nil { 615 return err 616 } 617 if e.idxChunk.NumEvents() == 0 { 618 return nil 619 } 620 iter := chunk.NewIterator4Chunk(e.idxChunk) 621 for event := iter.Begin(); event != iter.End(); event = iter.Next() { 622 handle, err := e.handleDefCauss.BuildHandle(event) 623 if err != nil { 624 return err 625 } 626 idxVals := extractIdxVals(event, e.idxValsBufs[e.scanEventCnt], e.idxDefCausFieldTypes, idxDefCausLen) 627 e.idxValsBufs[e.scanEventCnt] = idxVals 628 existingIdxVals, ok := e.idxValues.Get(handle) 629 if ok { 630 uFIDelatedIdxVals := append(existingIdxVals.([][]types.Causet), idxVals) 631 e.idxValues.Set(handle, uFIDelatedIdxVals) 632 } else { 633 e.idxValues.Set(handle, [][]types.Causet{idxVals}) 634 } 635 idxKey, _, err := e.index.GenIndexKey(sc, idxVals, handle, nil) 636 if err != nil { 637 return err 638 } 639 e.scanEventCnt++ 640 e.lastIdxKey = idxKey 641 if e.scanEventCnt >= e.batchSize { 642 return nil 643 } 644 } 645 } 646 } 647 648 // Next implements the InterlockingDirectorate Next interface. 649 func (e *CleanupIndexInterDirc) Next(ctx context.Context, req *chunk.Chunk) error { 650 req.Reset() 651 if e.done { 652 return nil 653 } 654 cleaningClusteredPrimaryKey := e.causet.Meta().IsCommonHandle && e.index.Meta().Primary 655 if cleaningClusteredPrimaryKey { 656 e.done = true 657 req.AppendUint64(0, 0) 658 return nil 659 } 660 661 var err error 662 if tbl, ok := e.causet.(causet.PartitionedBlock); ok { 663 pi := e.causet.Meta().GetPartitionInfo() 664 for _, p := range pi.Definitions { 665 e.causet = tbl.GetPartition(p.ID) 666 e.index = blocks.GetWriblockIndexByName(e.index.Meta().Name.L, e.causet) 667 e.physicalID = p.ID 668 err = e.init() 669 if err != nil { 670 return err 671 } 672 err = e.cleanBlockIndex(ctx) 673 if err != nil { 674 return err 675 } 676 } 677 } else { 678 err = e.cleanBlockIndex(ctx) 679 if err != nil { 680 return err 681 } 682 } 683 e.done = true 684 req.AppendUint64(0, e.removeCnt) 685 return nil 686 } 687 688 func (e *CleanupIndexInterDirc) cleanBlockIndex(ctx context.Context) error { 689 for { 690 errInTxn := ekv.RunInNewTxn(e.ctx.GetStore(), true, func(txn ekv.Transaction) error { 691 err := e.fetchIndex(ctx, txn) 692 if err != nil { 693 return err 694 } 695 values, err := e.batchGetRecord(txn) 696 if err != nil { 697 return err 698 } 699 err = e.deleteDanglingIdx(txn, values) 700 if err != nil { 701 return err 702 } 703 return nil 704 }) 705 if errInTxn != nil { 706 return errInTxn 707 } 708 if e.scanEventCnt == 0 { 709 break 710 } 711 e.scanEventCnt = 0 712 e.batchKeys = e.batchKeys[:0] 713 e.idxValues.Range(func(h ekv.Handle, val interface{}) bool { 714 e.idxValues.Delete(h) 715 return true 716 }) 717 } 718 return nil 719 } 720 721 func (e *CleanupIndexInterDirc) buildIndexScan(ctx context.Context, txn ekv.Transaction) (allegrosql.SelectResult, error) { 722 posetPosetDagPB, err := e.buildIdxPosetDagPB(txn) 723 if err != nil { 724 return nil, err 725 } 726 sc := e.ctx.GetStochastikVars().StmtCtx 727 var builder allegrosql.RequestBuilder 728 ranges := ranger.FullRange() 729 ekvReq, err := builder.SetIndexRanges(sc, e.physicalID, e.index.Meta().ID, ranges). 730 SetPosetDagRequest(posetPosetDagPB). 731 SetStartTS(txn.StartTS()). 732 SetKeepOrder(true). 733 SetFromStochastikVars(e.ctx.GetStochastikVars()). 734 Build() 735 if err != nil { 736 return nil, err 737 } 738 739 ekvReq.KeyRanges[0].StartKey = ekv.Key(e.lastIdxKey).PrefixNext() 740 ekvReq.Concurrency = 1 741 result, err := allegrosql.Select(ctx, e.ctx, ekvReq, e.getIdxDefCausTypes(), statistics.NewQueryFeedback(0, nil, 0, false)) 742 if err != nil { 743 return nil, err 744 } 745 result.Fetch(ctx) 746 return result, nil 747 } 748 749 // Open implements the InterlockingDirectorate Open interface. 750 func (e *CleanupIndexInterDirc) Open(ctx context.Context) error { 751 if err := e.baseInterlockingDirectorate.Open(ctx); err != nil { 752 return err 753 } 754 return e.init() 755 } 756 757 func (e *CleanupIndexInterDirc) init() error { 758 e.idxChunk = chunk.New(e.getIdxDefCausTypes(), e.initCap, e.maxChunkSize) 759 e.idxValues = ekv.NewHandleMap() 760 e.batchKeys = make([]ekv.Key, 0, e.batchSize) 761 e.idxValsBufs = make([][]types.Causet, e.batchSize) 762 sc := e.ctx.GetStochastikVars().StmtCtx 763 idxKey, _, err := e.index.GenIndexKey(sc, []types.Causet{{}}, ekv.IntHandle(math.MinInt64), nil) 764 if err != nil { 765 return err 766 } 767 e.lastIdxKey = idxKey 768 return nil 769 } 770 771 func (e *CleanupIndexInterDirc) buildIdxPosetDagPB(txn ekv.Transaction) (*fidelpb.PosetDagRequest, error) { 772 posetPosetDagReq := &fidelpb.PosetDagRequest{} 773 posetPosetDagReq.TimeZoneName, posetPosetDagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetStochastikVars().Location()) 774 sc := e.ctx.GetStochastikVars().StmtCtx 775 posetPosetDagReq.Flags = sc.PushDownFlags() 776 for i := range e.defCausumns { 777 posetPosetDagReq.OutputOffsets = append(posetPosetDagReq.OutputOffsets, uint32(i)) 778 } 779 780 execPB := e.constructIndexScanPB() 781 posetPosetDagReq.InterlockingDirectorates = append(posetPosetDagReq.InterlockingDirectorates, execPB) 782 err := causetembedded.SetPBDeferredCausetsDefaultValue(e.ctx, posetPosetDagReq.InterlockingDirectorates[0].IdxScan.DeferredCausets, e.defCausumns) 783 if err != nil { 784 return nil, err 785 } 786 787 limitInterDirc := e.constructLimitPB() 788 posetPosetDagReq.InterlockingDirectorates = append(posetPosetDagReq.InterlockingDirectorates, limitInterDirc) 789 allegrosql.SetEncodeType(e.ctx, posetPosetDagReq) 790 return posetPosetDagReq, nil 791 } 792 793 func (e *CleanupIndexInterDirc) constructIndexScanPB() *fidelpb.InterlockingDirectorate { 794 idxInterDirc := &fidelpb.IndexScan{ 795 BlockId: e.physicalID, 796 IndexId: e.index.Meta().ID, 797 DeferredCausets: soliton.DeferredCausetsToProto(e.defCausumns, e.causet.Meta().PKIsHandle), 798 PrimaryDeferredCausetIds: blocks.TryGetCommonPkDeferredCausetIds(e.causet.Meta()), 799 } 800 return &fidelpb.InterlockingDirectorate{Tp: fidelpb.InterDircType_TypeIndexScan, IdxScan: idxInterDirc} 801 } 802 803 func (e *CleanupIndexInterDirc) constructLimitPB() *fidelpb.InterlockingDirectorate { 804 limitInterDirc := &fidelpb.Limit{ 805 Limit: e.batchSize, 806 } 807 return &fidelpb.InterlockingDirectorate{Tp: fidelpb.InterDircType_TypeLimit, Limit: limitInterDirc} 808 } 809 810 // Close implements the InterlockingDirectorate Close interface. 811 func (e *CleanupIndexInterDirc) Close() error { 812 return nil 813 }