github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/builder.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package interlock
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"sort"
    20  	"strings"
    21  	"sync"
    22  	"time"
    23  	"unsafe"
    24  
    25  	"github.com/cznic/mathutil"
    26  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    27  	"github.com/whtcorpsinc/BerolinaSQL/ast"
    28  	"github.com/whtcorpsinc/BerolinaSQL/auth"
    29  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    30  	"github.com/whtcorpsinc/ekvproto/pkg/diagnosticspb"
    31  	"github.com/whtcorpsinc/errors"
    32  	"github.com/whtcorpsinc/fidelpb/go-fidelpb"
    33  	"github.com/whtcorpsinc/milevadb/allegrosql"
    34  	"github.com/whtcorpsinc/milevadb/causet"
    35  	"github.com/whtcorpsinc/milevadb/causet/blocks"
    36  	causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded"
    37  	causetutil "github.com/whtcorpsinc/milevadb/causet/soliton"
    38  	"github.com/whtcorpsinc/milevadb/ekv"
    39  	"github.com/whtcorpsinc/milevadb/interlock/aggfuncs"
    40  	"github.com/whtcorpsinc/milevadb/memex"
    41  	"github.com/whtcorpsinc/milevadb/memex/aggregation"
    42  	"github.com/whtcorpsinc/milevadb/metrics"
    43  	"github.com/whtcorpsinc/milevadb/petri"
    44  	"github.com/whtcorpsinc/milevadb/schemareplicant"
    45  	"github.com/whtcorpsinc/milevadb/soliton"
    46  	"github.com/whtcorpsinc/milevadb/soliton/admin"
    47  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    48  	"github.com/whtcorpsinc/milevadb/soliton/execdetails"
    49  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    50  	"github.com/whtcorpsinc/milevadb/soliton/ranger"
    51  	"github.com/whtcorpsinc/milevadb/soliton/rowcodec"
    52  	"github.com/whtcorpsinc/milevadb/soliton/timeutil"
    53  	"github.com/whtcorpsinc/milevadb/statistics"
    54  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    55  	"github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx"
    56  	"github.com/whtcorpsinc/milevadb/types"
    57  	"go.uber.org/zap"
    58  )
    59  
    60  var (
    61  	interlockCounterMergeJoinInterDirc                      = metrics.InterlockingDirectorateCounter.WithLabelValues("MergeJoinInterDirc")
    62  	interlockCountHashJoinInterDirc                         = metrics.InterlockingDirectorateCounter.WithLabelValues("HashJoinInterDirc")
    63  	interlockCounterHashAggInterDirc                        = metrics.InterlockingDirectorateCounter.WithLabelValues("HashAggInterDirc")
    64  	interlockStreamAggInterDirc                             = metrics.InterlockingDirectorateCounter.WithLabelValues("StreamAggInterDirc")
    65  	interlockCounterSortInterDirc                           = metrics.InterlockingDirectorateCounter.WithLabelValues("SortInterDirc")
    66  	interlockCounterTopNInterDirc                           = metrics.InterlockingDirectorateCounter.WithLabelValues("TopNInterDirc")
    67  	interlockCounterNestedLoopApplyInterDirc                = metrics.InterlockingDirectorateCounter.WithLabelValues("NestedLoopApplyInterDirc")
    68  	interlockCounterIndexLookUpJoin                         = metrics.InterlockingDirectorateCounter.WithLabelValues("IndexLookUpJoin")
    69  	interlockCounterIndexLookUpInterlockingDirectorate      = metrics.InterlockingDirectorateCounter.WithLabelValues("IndexLookUpInterlockingDirectorate")
    70  	interlockCounterIndexMergeReaderInterlockingDirectorate = metrics.InterlockingDirectorateCounter.WithLabelValues("IndexMergeReaderInterlockingDirectorate")
    71  )
    72  
    73  // interlockBuilder builds an InterlockingDirectorate from a Causet.
    74  // The SchemaReplicant must not change during execution.
    75  type interlockBuilder struct {
    76  	ctx        stochastikctx.Context
    77  	is         schemareplicant.SchemaReplicant
    78  	snapshotTS uint64 // The consistent snapshot timestamp for the interlock to read data.
    79  	err        error  // err is set when there is error happened during InterlockingDirectorate building process.
    80  	hasLock    bool
    81  }
    82  
    83  func newInterlockingDirectorateBuilder(ctx stochastikctx.Context, is schemareplicant.SchemaReplicant) *interlockBuilder {
    84  	return &interlockBuilder{
    85  		ctx: ctx,
    86  		is:  is,
    87  	}
    88  }
    89  
    90  // MockPhysicalCauset is used to return a specified interlock in when build.
    91  // It is mainly used for testing.
    92  type MockPhysicalCauset interface {
    93  	causetembedded.PhysicalCauset
    94  	GetInterlockingDirectorate() InterlockingDirectorate
    95  }
    96  
    97  func (b *interlockBuilder) build(p causetembedded.Causet) InterlockingDirectorate {
    98  	switch v := p.(type) {
    99  	case nil:
   100  		return nil
   101  	case *causetembedded.Change:
   102  		return b.buildChange(v)
   103  	case *causetembedded.CheckBlock:
   104  		return b.buildCheckBlock(v)
   105  	case *causetembedded.RecoverIndex:
   106  		return b.buildRecoverIndex(v)
   107  	case *causetembedded.CleanupIndex:
   108  		return b.buildCleanupIndex(v)
   109  	case *causetembedded.ChecHoTTexRange:
   110  		return b.buildChecHoTTexRange(v)
   111  	case *causetembedded.ChecksumBlock:
   112  		return b.buildChecksumBlock(v)
   113  	case *causetembedded.ReloadExprPushdownBlacklist:
   114  		return b.buildReloadExprPushdownBlacklist(v)
   115  	case *causetembedded.ReloadOptMemruleBlacklist:
   116  		return b.buildReloadOptMemruleBlacklist(v)
   117  	case *causetembedded.AdminPlugins:
   118  		return b.buildAdminPlugins(v)
   119  	case *causetembedded.DBS:
   120  		return b.buildDBS(v)
   121  	case *causetembedded.Deallocate:
   122  		return b.buildDeallocate(v)
   123  	case *causetembedded.Delete:
   124  		return b.buildDelete(v)
   125  	case *causetembedded.InterDircute:
   126  		return b.buildInterDircute(v)
   127  	case *causetembedded.Trace:
   128  		return b.buildTrace(v)
   129  	case *causetembedded.Explain:
   130  		return b.buildExplain(v)
   131  	case *causetembedded.PointGetCauset:
   132  		return b.buildPointGet(v)
   133  	case *causetembedded.BatchPointGetCauset:
   134  		return b.buildBatchPointGet(v)
   135  	case *causetembedded.Insert:
   136  		return b.buildInsert(v)
   137  	case *causetembedded.LoadData:
   138  		return b.buildLoadData(v)
   139  	case *causetembedded.LoadStats:
   140  		return b.buildLoadStats(v)
   141  	case *causetembedded.IndexAdvise:
   142  		return b.buildIndexAdvise(v)
   143  	case *causetembedded.PhysicalLimit:
   144  		return b.buildLimit(v)
   145  	case *causetembedded.Prepare:
   146  		return b.buildPrepare(v)
   147  	case *causetembedded.PhysicalLock:
   148  		return b.buildSelectLock(v)
   149  	case *causetembedded.CancelDBSJobs:
   150  		return b.buildCancelDBSJobs(v)
   151  	case *causetembedded.ShowNextEventID:
   152  		return b.buildShowNextEventID(v)
   153  	case *causetembedded.ShowDBS:
   154  		return b.buildShowDBS(v)
   155  	case *causetembedded.PhysicalShowDBSJobs:
   156  		return b.buildShowDBSJobs(v)
   157  	case *causetembedded.ShowDBSJobQueries:
   158  		return b.buildShowDBSJobQueries(v)
   159  	case *causetembedded.ShowSlow:
   160  		return b.buildShowSlow(v)
   161  	case *causetembedded.PhysicalShow:
   162  		return b.buildShow(v)
   163  	case *causetembedded.Simple:
   164  		return b.buildSimple(v)
   165  	case *causetembedded.Set:
   166  		return b.buildSet(v)
   167  	case *causetembedded.SetConfig:
   168  		return b.buildSetConfig(v)
   169  	case *causetembedded.PhysicalSort:
   170  		return b.buildSort(v)
   171  	case *causetembedded.PhysicalTopN:
   172  		return b.buildTopN(v)
   173  	case *causetembedded.PhysicalUnionAll:
   174  		return b.buildUnionAll(v)
   175  	case *causetembedded.UFIDelate:
   176  		return b.buildUFIDelate(v)
   177  	case *causetembedded.PhysicalUnionScan:
   178  		return b.buildUnionScanInterDirc(v)
   179  	case *causetembedded.PhysicalHashJoin:
   180  		return b.buildHashJoin(v)
   181  	case *causetembedded.PhysicalMergeJoin:
   182  		return b.buildMergeJoin(v)
   183  	case *causetembedded.PhysicalIndexJoin:
   184  		return b.buildIndexLookUpJoin(v)
   185  	case *causetembedded.PhysicalIndexMergeJoin:
   186  		return b.buildIndexLookUpMergeJoin(v)
   187  	case *causetembedded.PhysicalIndexHashJoin:
   188  		return b.buildIndexNestedLoopHashJoin(v)
   189  	case *causetembedded.PhysicalSelection:
   190  		return b.buildSelection(v)
   191  	case *causetembedded.PhysicalHashAgg:
   192  		return b.buildHashAgg(v)
   193  	case *causetembedded.PhysicalStreamAgg:
   194  		return b.buildStreamAgg(v)
   195  	case *causetembedded.PhysicalProjection:
   196  		return b.buildProjection(v)
   197  	case *causetembedded.PhysicalMemBlock:
   198  		return b.buildMemBlock(v)
   199  	case *causetembedded.PhysicalBlockDual:
   200  		return b.buildBlockDual(v)
   201  	case *causetembedded.PhysicalApply:
   202  		return b.buildApply(v)
   203  	case *causetembedded.PhysicalMaxOneEvent:
   204  		return b.buildMaxOneEvent(v)
   205  	case *causetembedded.Analyze:
   206  		return b.buildAnalyze(v)
   207  	case *causetembedded.PhysicalBlockReader:
   208  		return b.buildBlockReader(v)
   209  	case *causetembedded.PhysicalIndexReader:
   210  		return b.buildIndexReader(v)
   211  	case *causetembedded.PhysicalIndexLookUpReader:
   212  		return b.buildIndexLookUpReader(v)
   213  	case *causetembedded.PhysicalWindow:
   214  		return b.buildWindow(v)
   215  	case *causetembedded.PhysicalShuffle:
   216  		return b.buildShuffle(v)
   217  	case *causetembedded.PhysicalShuffleDataSourceStub:
   218  		return b.buildShuffleDataSourceStub(v)
   219  	case *causetembedded.ALLEGROSQLBindCauset:
   220  		return b.buildALLEGROSQLBindInterDirc(v)
   221  	case *causetembedded.SplitRegion:
   222  		return b.buildSplitRegion(v)
   223  	case *causetembedded.PhysicalIndexMergeReader:
   224  		return b.buildIndexMergeReader(v)
   225  	case *causetembedded.SelectInto:
   226  		return b.buildSelectInto(v)
   227  	case *causetembedded.AdminShowTelemetry:
   228  		return b.buildAdminShowTelemetry(v)
   229  	case *causetembedded.AdminResetTelemetryID:
   230  		return b.buildAdminResetTelemetryID(v)
   231  	default:
   232  		if mp, ok := p.(MockPhysicalCauset); ok {
   233  			return mp.GetInterlockingDirectorate()
   234  		}
   235  
   236  		b.err = ErrUnknownCauset.GenWithStack("Unknown Causet %T", p)
   237  		return nil
   238  	}
   239  }
   240  
   241  func (b *interlockBuilder) buildCancelDBSJobs(v *causetembedded.CancelDBSJobs) InterlockingDirectorate {
   242  	e := &CancelDBSJobsInterDirc{
   243  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   244  		jobIDs:                      v.JobIDs,
   245  	}
   246  	txn, err := e.ctx.Txn(true)
   247  	if err != nil {
   248  		b.err = err
   249  		return nil
   250  	}
   251  
   252  	e.errs, b.err = admin.CancelJobs(txn, e.jobIDs)
   253  	if b.err != nil {
   254  		return nil
   255  	}
   256  	return e
   257  }
   258  
   259  func (b *interlockBuilder) buildChange(v *causetembedded.Change) InterlockingDirectorate {
   260  	return &ChangeInterDirc{
   261  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   262  		ChangeStmt:                  v.ChangeStmt,
   263  	}
   264  }
   265  
   266  func (b *interlockBuilder) buildShowNextEventID(v *causetembedded.ShowNextEventID) InterlockingDirectorate {
   267  	e := &ShowNextEventIDInterDirc{
   268  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   269  		tblName:                     v.BlockName,
   270  	}
   271  	return e
   272  }
   273  
   274  func (b *interlockBuilder) buildShowDBS(v *causetembedded.ShowDBS) InterlockingDirectorate {
   275  	// We get DBSInfo here because for InterlockingDirectorates that returns result set,
   276  	// next will be called after transaction has been committed.
   277  	// We need the transaction to get DBSInfo.
   278  	e := &ShowDBSInterDirc{
   279  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   280  	}
   281  
   282  	var err error
   283  	tenantManager := petri.GetPetri(e.ctx).DBS().TenantManager()
   284  	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
   285  	e.dbsTenantID, err = tenantManager.GetTenantID(ctx)
   286  	cancel()
   287  	if err != nil {
   288  		b.err = err
   289  		return nil
   290  	}
   291  	txn, err := e.ctx.Txn(true)
   292  	if err != nil {
   293  		b.err = err
   294  		return nil
   295  	}
   296  
   297  	dbsInfo, err := admin.GetDBSInfo(txn)
   298  	if err != nil {
   299  		b.err = err
   300  		return nil
   301  	}
   302  	e.dbsInfo = dbsInfo
   303  	e.selfID = tenantManager.ID()
   304  	return e
   305  }
   306  
   307  func (b *interlockBuilder) buildShowDBSJobs(v *causetembedded.PhysicalShowDBSJobs) InterlockingDirectorate {
   308  	e := &ShowDBSJobsInterDirc{
   309  		jobNumber:                   int(v.JobNumber),
   310  		is:                          b.is,
   311  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   312  	}
   313  	return e
   314  }
   315  
   316  func (b *interlockBuilder) buildShowDBSJobQueries(v *causetembedded.ShowDBSJobQueries) InterlockingDirectorate {
   317  	e := &ShowDBSJobQueriesInterDirc{
   318  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   319  		jobIDs:                      v.JobIDs,
   320  	}
   321  	return e
   322  }
   323  
   324  func (b *interlockBuilder) buildShowSlow(v *causetembedded.ShowSlow) InterlockingDirectorate {
   325  	e := &ShowSlowInterDirc{
   326  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   327  		ShowSlow:                    v.ShowSlow,
   328  	}
   329  	return e
   330  }
   331  
   332  // buildIndexLookUpChecker builds check information to IndexLookUpReader.
   333  func buildIndexLookUpChecker(b *interlockBuilder, p *causetembedded.PhysicalIndexLookUpReader,
   334  	e *IndexLookUpInterlockingDirectorate) {
   335  	is := p.IndexCausets[0].(*causetembedded.PhysicalIndexScan)
   336  	fullDefCausLen := len(is.Index.DeferredCausets) + len(p.CommonHandleDefCauss)
   337  	if !e.isCommonHandle() {
   338  		fullDefCausLen += 1
   339  	}
   340  	e.posetPosetDagPB.OutputOffsets = make([]uint32, fullDefCausLen)
   341  	for i := 0; i < fullDefCausLen; i++ {
   342  		e.posetPosetDagPB.OutputOffsets[i] = uint32(i)
   343  	}
   344  
   345  	ts := p.BlockCausets[0].(*causetembedded.PhysicalBlockScan)
   346  	e.handleIdx = ts.HandleIdx
   347  
   348  	e.ranges = ranger.FullRange()
   349  
   350  	tps := make([]*types.FieldType, 0, fullDefCausLen)
   351  	for _, defCaus := range is.DeferredCausets {
   352  		tps = append(tps, &defCaus.FieldType)
   353  	}
   354  
   355  	if !e.isCommonHandle() {
   356  		tps = append(tps, types.NewFieldType(allegrosql.TypeLonglong))
   357  	}
   358  
   359  	e.checHoTTexValue = &checHoTTexValue{idxDefCausTps: tps}
   360  
   361  	defCausNames := make([]string, 0, len(is.IdxDefCauss))
   362  	for i := range is.IdxDefCauss {
   363  		defCausNames = append(defCausNames, is.DeferredCausets[i].Name.O)
   364  	}
   365  	if defcaus, missingDefCausName := causet.FindDefCauss(e.causet.DefCauss(), defCausNames, true); missingDefCausName != "" {
   366  		b.err = causetembedded.ErrUnknownDeferredCauset.GenWithStack("Unknown defCausumn %s", missingDefCausName)
   367  	} else {
   368  		e.idxTblDefCauss = defcaus
   369  	}
   370  }
   371  
   372  func (b *interlockBuilder) buildCheckBlock(v *causetembedded.CheckBlock) InterlockingDirectorate {
   373  	readerInterDircs := make([]*IndexLookUpInterlockingDirectorate, 0, len(v.IndexLookUpReaders))
   374  	for _, readerCauset := range v.IndexLookUpReaders {
   375  		readerInterDirc, err := buildNoRangeIndexLookUpReader(b, readerCauset)
   376  		if err != nil {
   377  			b.err = errors.Trace(err)
   378  			return nil
   379  		}
   380  		buildIndexLookUpChecker(b, readerCauset, readerInterDirc)
   381  
   382  		readerInterDircs = append(readerInterDircs, readerInterDirc)
   383  	}
   384  
   385  	e := &CheckBlockInterDirc{
   386  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   387  		dbName:                      v.DBName,
   388  		causet:                      v.Block,
   389  		indexInfos:                  v.IndexInfos,
   390  		is:                          b.is,
   391  		srcs:                        readerInterDircs,
   392  		exitCh:                      make(chan struct{}),
   393  		retCh:                       make(chan error, len(readerInterDircs)),
   394  		checHoTTex:                  v.ChecHoTTex,
   395  	}
   396  	return e
   397  }
   398  
   399  func buildIdxDefCaussConcatHandleDefCauss(tblInfo *perceptron.BlockInfo, indexInfo *perceptron.IndexInfo) []*perceptron.DeferredCausetInfo {
   400  	handleLen := 1
   401  	var pkDefCauss []*perceptron.IndexDeferredCauset
   402  	if tblInfo.IsCommonHandle {
   403  		pkIdx := blocks.FindPrimaryIndex(tblInfo)
   404  		pkDefCauss = pkIdx.DeferredCausets
   405  		handleLen = len(pkIdx.DeferredCausets)
   406  	}
   407  	defCausumns := make([]*perceptron.DeferredCausetInfo, 0, len(indexInfo.DeferredCausets)+handleLen)
   408  	for _, idxDefCaus := range indexInfo.DeferredCausets {
   409  		defCausumns = append(defCausumns, tblInfo.DeferredCausets[idxDefCaus.Offset])
   410  	}
   411  	if tblInfo.IsCommonHandle {
   412  		for _, c := range pkDefCauss {
   413  			defCausumns = append(defCausumns, tblInfo.DeferredCausets[c.Offset])
   414  		}
   415  		return defCausumns
   416  	}
   417  	handleOffset := len(defCausumns)
   418  	handleDefCaussInfo := &perceptron.DeferredCausetInfo{
   419  		ID:     perceptron.ExtraHandleID,
   420  		Name:   perceptron.ExtraHandleName,
   421  		Offset: handleOffset,
   422  	}
   423  	handleDefCaussInfo.FieldType = *types.NewFieldType(allegrosql.TypeLonglong)
   424  	defCausumns = append(defCausumns, handleDefCaussInfo)
   425  	return defCausumns
   426  }
   427  
   428  func (b *interlockBuilder) buildRecoverIndex(v *causetembedded.RecoverIndex) InterlockingDirectorate {
   429  	tblInfo := v.Block.BlockInfo
   430  	t, err := b.is.BlockByName(v.Block.Schema, tblInfo.Name)
   431  	if err != nil {
   432  		b.err = err
   433  		return nil
   434  	}
   435  	idxName := strings.ToLower(v.IndexName)
   436  	index := blocks.GetWriblockIndexByName(idxName, t)
   437  	if index == nil {
   438  		b.err = errors.Errorf("index `%v` is not found in causet `%v`.", v.IndexName, v.Block.Name.O)
   439  		return nil
   440  	}
   441  	e := &RecoverIndexInterDirc{
   442  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   443  		defCausumns:                 buildIdxDefCaussConcatHandleDefCauss(tblInfo, index.Meta()),
   444  		index:                       index,
   445  		causet:                      t,
   446  		physicalID:                  t.Meta().ID,
   447  	}
   448  	sessCtx := e.ctx.GetStochastikVars().StmtCtx
   449  	e.handleDefCauss = buildHandleDefCaussForInterDirc(sessCtx, tblInfo, index.Meta(), e.defCausumns)
   450  	return e
   451  }
   452  
   453  func buildHandleDefCaussForInterDirc(sctx *stmtctx.StatementContext, tblInfo *perceptron.BlockInfo,
   454  	idxInfo *perceptron.IndexInfo, allDefCausInfo []*perceptron.DeferredCausetInfo) causetembedded.HandleDefCauss {
   455  	if !tblInfo.IsCommonHandle {
   456  		extraDefCausPos := len(allDefCausInfo) - 1
   457  		intDefCaus := &memex.DeferredCauset{
   458  			Index:   extraDefCausPos,
   459  			RetType: types.NewFieldType(allegrosql.TypeLonglong),
   460  		}
   461  		return causetembedded.NewIntHandleDefCauss(intDefCaus)
   462  	}
   463  	tblDefCauss := make([]*memex.DeferredCauset, len(tblInfo.DeferredCausets))
   464  	for i := 0; i < len(tblInfo.DeferredCausets); i++ {
   465  		c := tblInfo.DeferredCausets[i]
   466  		tblDefCauss[i] = &memex.DeferredCauset{
   467  			RetType: &c.FieldType,
   468  			ID:      c.ID,
   469  		}
   470  	}
   471  	pkIdx := blocks.FindPrimaryIndex(tblInfo)
   472  	for i, c := range pkIdx.DeferredCausets {
   473  		tblDefCauss[c.Offset].Index = len(idxInfo.DeferredCausets) + i
   474  	}
   475  	return causetembedded.NewCommonHandleDefCauss(sctx, tblInfo, pkIdx, tblDefCauss)
   476  }
   477  
   478  func (b *interlockBuilder) buildCleanupIndex(v *causetembedded.CleanupIndex) InterlockingDirectorate {
   479  	tblInfo := v.Block.BlockInfo
   480  	t, err := b.is.BlockByName(v.Block.Schema, tblInfo.Name)
   481  	if err != nil {
   482  		b.err = err
   483  		return nil
   484  	}
   485  	idxName := strings.ToLower(v.IndexName)
   486  	var index causet.Index
   487  	for _, idx := range t.Indices() {
   488  		if idx.Meta().State != perceptron.StatePublic {
   489  			continue
   490  		}
   491  		if idxName == idx.Meta().Name.L {
   492  			index = idx
   493  			break
   494  		}
   495  	}
   496  
   497  	if index == nil {
   498  		b.err = errors.Errorf("index `%v` is not found in causet `%v`.", v.IndexName, v.Block.Name.O)
   499  		return nil
   500  	}
   501  	e := &CleanupIndexInterDirc{
   502  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   503  		defCausumns:                 buildIdxDefCaussConcatHandleDefCauss(tblInfo, index.Meta()),
   504  		index:                       index,
   505  		causet:                      t,
   506  		physicalID:                  t.Meta().ID,
   507  		batchSize:                   20000,
   508  	}
   509  	sessCtx := e.ctx.GetStochastikVars().StmtCtx
   510  	e.handleDefCauss = buildHandleDefCaussForInterDirc(sessCtx, tblInfo, index.Meta(), e.defCausumns)
   511  	return e
   512  }
   513  
   514  func (b *interlockBuilder) buildChecHoTTexRange(v *causetembedded.ChecHoTTexRange) InterlockingDirectorate {
   515  	tb, err := b.is.BlockByName(v.Block.Schema, v.Block.Name)
   516  	if err != nil {
   517  		b.err = err
   518  		return nil
   519  	}
   520  	e := &ChecHoTTexRangeInterDirc{
   521  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   522  		handleRanges:                v.HandleRanges,
   523  		causet:                      tb.Meta(),
   524  		is:                          b.is,
   525  	}
   526  	idxName := strings.ToLower(v.IndexName)
   527  	for _, idx := range tb.Indices() {
   528  		if idx.Meta().Name.L == idxName {
   529  			e.index = idx.Meta()
   530  			e.startKey = make([]types.Causet, len(e.index.DeferredCausets))
   531  			break
   532  		}
   533  	}
   534  	return e
   535  }
   536  
   537  func (b *interlockBuilder) buildChecksumBlock(v *causetembedded.ChecksumBlock) InterlockingDirectorate {
   538  	e := &ChecksumBlockInterDirc{
   539  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   540  		blocks:                      make(map[int64]*checksumContext),
   541  		done:                        false,
   542  	}
   543  	startTs, err := b.getSnapshotTS()
   544  	if err != nil {
   545  		b.err = err
   546  		return nil
   547  	}
   548  	for _, t := range v.Blocks {
   549  		e.blocks[t.BlockInfo.ID] = newChecksumContext(t.DBInfo, t.BlockInfo, startTs)
   550  	}
   551  	return e
   552  }
   553  
   554  func (b *interlockBuilder) buildReloadExprPushdownBlacklist(v *causetembedded.ReloadExprPushdownBlacklist) InterlockingDirectorate {
   555  	return &ReloadExprPushdownBlacklistInterDirc{baseInterlockingDirectorate{ctx: b.ctx}}
   556  }
   557  
   558  func (b *interlockBuilder) buildReloadOptMemruleBlacklist(v *causetembedded.ReloadOptMemruleBlacklist) InterlockingDirectorate {
   559  	return &ReloadOptMemruleBlacklistInterDirc{baseInterlockingDirectorate{ctx: b.ctx}}
   560  }
   561  
   562  func (b *interlockBuilder) buildAdminPlugins(v *causetembedded.AdminPlugins) InterlockingDirectorate {
   563  	return &AdminPluginsInterDirc{baseInterlockingDirectorate: baseInterlockingDirectorate{ctx: b.ctx}, CausetAction: v.CausetAction, Plugins: v.Plugins}
   564  }
   565  
   566  func (b *interlockBuilder) buildDeallocate(v *causetembedded.Deallocate) InterlockingDirectorate {
   567  	base := newBaseInterlockingDirectorate(b.ctx, nil, v.ID())
   568  	base.initCap = chunk.ZeroCapacity
   569  	e := &DeallocateInterDirc{
   570  		baseInterlockingDirectorate: base,
   571  		Name:                        v.Name,
   572  	}
   573  	return e
   574  }
   575  
   576  func (b *interlockBuilder) buildSelectLock(v *causetembedded.PhysicalLock) InterlockingDirectorate {
   577  	b.hasLock = true
   578  	if b.err = b.uFIDelateForUFIDelateTSIfNeeded(v.Children()[0]); b.err != nil {
   579  		return nil
   580  	}
   581  	// Build 'select for uFIDelate' using the 'for uFIDelate' ts.
   582  	b.snapshotTS = b.ctx.GetStochastikVars().TxnCtx.GetForUFIDelateTS()
   583  
   584  	src := b.build(v.Children()[0])
   585  	if b.err != nil {
   586  		return nil
   587  	}
   588  	if !b.ctx.GetStochastikVars().InTxn() {
   589  		// Locking of rows for uFIDelate using SELECT FOR UFIDelATE only applies when autocommit
   590  		// is disabled (either by beginning transaction with START TRANSACTION or by setting
   591  		// autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked.
   592  		// See https://dev.allegrosql.com/doc/refman/5.7/en/innodb-locking-reads.html
   593  		return src
   594  	}
   595  	e := &SelectLockInterDirc{
   596  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), src),
   597  		Lock:                        v.Lock,
   598  		tblID2Handle:                v.TblID2Handle,
   599  		partitionedBlock:            v.PartitionedBlock,
   600  	}
   601  	return e
   602  }
   603  
   604  func (b *interlockBuilder) buildLimit(v *causetembedded.PhysicalLimit) InterlockingDirectorate {
   605  	childInterDirc := b.build(v.Children()[0])
   606  	if b.err != nil {
   607  		return nil
   608  	}
   609  	n := int(mathutil.MinUint64(v.Count, uint64(b.ctx.GetStochastikVars().MaxChunkSize)))
   610  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDirc)
   611  	base.initCap = n
   612  	e := &LimitInterDirc{
   613  		baseInterlockingDirectorate: base,
   614  		begin:                       v.Offset,
   615  		end:                         v.Offset + v.Count,
   616  	}
   617  	return e
   618  }
   619  
   620  func (b *interlockBuilder) buildPrepare(v *causetembedded.Prepare) InterlockingDirectorate {
   621  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())
   622  	base.initCap = chunk.ZeroCapacity
   623  	return &PrepareInterDirc{
   624  		baseInterlockingDirectorate: base,
   625  		is:                          b.is,
   626  		name:                        v.Name,
   627  		sqlText:                     v.ALLEGROSQLText,
   628  	}
   629  }
   630  
   631  func (b *interlockBuilder) buildInterDircute(v *causetembedded.InterDircute) InterlockingDirectorate {
   632  	e := &InterDircuteInterDirc{
   633  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   634  		is:                          b.is,
   635  		name:                        v.Name,
   636  		usingVars:                   v.UsingVars,
   637  		id:                          v.InterDircID,
   638  		stmt:                        v.Stmt,
   639  		plan:                        v.Causet,
   640  		outputNames:                 v.OutputNames(),
   641  	}
   642  	return e
   643  }
   644  
   645  func (b *interlockBuilder) buildShow(v *causetembedded.PhysicalShow) InterlockingDirectorate {
   646  	e := &ShowInterDirc{
   647  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   648  		Tp:                          v.Tp,
   649  		DBName:                      perceptron.NewCIStr(v.DBName),
   650  		Block:                       v.Block,
   651  		DeferredCauset:              v.DeferredCauset,
   652  		IndexName:                   v.IndexName,
   653  		Flag:                        v.Flag,
   654  		Roles:                       v.Roles,
   655  		User:                        v.User,
   656  		is:                          b.is,
   657  		Full:                        v.Full,
   658  		IfNotExists:                 v.IfNotExists,
   659  		GlobalScope:                 v.GlobalScope,
   660  		Extended:                    v.Extended,
   661  	}
   662  	if e.Tp == ast.ShowGrants && e.User == nil {
   663  		// The input is a "show grants" memex, fulfill the user and roles field.
   664  		// Note: "show grants" result are different from "show grants for current_user",
   665  		// The former determine privileges with roles, while the later doesn't.
   666  		vars := e.ctx.GetStochastikVars()
   667  		e.User = &auth.UserIdentity{Username: vars.User.AuthUsername, Hostname: vars.User.AuthHostname}
   668  		e.Roles = vars.ActiveRoles
   669  	}
   670  	if e.Tp == ast.ShowMasterStatus {
   671  		// show master status need start ts.
   672  		if _, err := e.ctx.Txn(true); err != nil {
   673  			b.err = err
   674  		}
   675  	}
   676  	return e
   677  }
   678  
   679  func (b *interlockBuilder) buildSimple(v *causetembedded.Simple) InterlockingDirectorate {
   680  	switch s := v.Statement.(type) {
   681  	case *ast.GrantStmt:
   682  		return b.buildGrant(s)
   683  	case *ast.RevokeStmt:
   684  		return b.buildRevoke(s)
   685  	case *ast.BRIEStmt:
   686  		return b.buildBRIE(s, v.Schema())
   687  	}
   688  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())
   689  	base.initCap = chunk.ZeroCapacity
   690  	e := &SimpleInterDirc{
   691  		baseInterlockingDirectorate: base,
   692  		Statement:                   v.Statement,
   693  		is:                          b.is,
   694  	}
   695  	return e
   696  }
   697  
   698  func (b *interlockBuilder) buildSet(v *causetembedded.Set) InterlockingDirectorate {
   699  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())
   700  	base.initCap = chunk.ZeroCapacity
   701  	e := &SetInterlockingDirectorate{
   702  		baseInterlockingDirectorate: base,
   703  		vars:                        v.VarAssigns,
   704  	}
   705  	return e
   706  }
   707  
   708  func (b *interlockBuilder) buildSetConfig(v *causetembedded.SetConfig) InterlockingDirectorate {
   709  	return &SetConfigInterDirc{
   710  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   711  		p:                           v,
   712  	}
   713  }
   714  
   715  func (b *interlockBuilder) buildInsert(v *causetembedded.Insert) InterlockingDirectorate {
   716  	if v.SelectCauset != nil {
   717  		// Try to uFIDelate the forUFIDelateTS for insert/replace into select memexs.
   718  		// Set the selectCauset parameter to nil to make it always uFIDelate the forUFIDelateTS.
   719  		if b.err = b.uFIDelateForUFIDelateTSIfNeeded(nil); b.err != nil {
   720  			return nil
   721  		}
   722  	}
   723  	b.snapshotTS = b.ctx.GetStochastikVars().TxnCtx.GetForUFIDelateTS()
   724  	selectInterDirc := b.build(v.SelectCauset)
   725  	if b.err != nil {
   726  		return nil
   727  	}
   728  	var baseInterDirc baseInterlockingDirectorate
   729  	if selectInterDirc != nil {
   730  		baseInterDirc = newBaseInterlockingDirectorate(b.ctx, nil, v.ID(), selectInterDirc)
   731  	} else {
   732  		baseInterDirc = newBaseInterlockingDirectorate(b.ctx, nil, v.ID())
   733  	}
   734  	baseInterDirc.initCap = chunk.ZeroCapacity
   735  
   736  	ivs := &InsertValues{
   737  		baseInterlockingDirectorate: baseInterDirc,
   738  		Block:                       v.Block,
   739  		DeferredCausets:             v.DeferredCausets,
   740  		Lists:                       v.Lists,
   741  		SetList:                     v.SetList,
   742  		GenExprs:                    v.GenDefCauss.Exprs,
   743  		allAssignmentsAreConstant:   v.AllAssignmentsAreConstant,
   744  		hasRefDefCauss:              v.NeedFillDefaultValue,
   745  		SelectInterDirc:             selectInterDirc,
   746  	}
   747  	err := ivs.initInsertDeferredCausets()
   748  	if err != nil {
   749  		b.err = err
   750  		return nil
   751  	}
   752  
   753  	if v.IsReplace {
   754  		return b.buildReplace(ivs)
   755  	}
   756  	insert := &InsertInterDirc{
   757  		InsertValues: ivs,
   758  		OnDuplicate:  append(v.OnDuplicate, v.GenDefCauss.OnDuplicates...),
   759  	}
   760  	return insert
   761  }
   762  
   763  func (b *interlockBuilder) buildLoadData(v *causetembedded.LoadData) InterlockingDirectorate {
   764  	tbl, ok := b.is.BlockByID(v.Block.BlockInfo.ID)
   765  	if !ok {
   766  		b.err = errors.Errorf("Can not get causet %d", v.Block.BlockInfo.ID)
   767  		return nil
   768  	}
   769  	insertVal := &InsertValues{
   770  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, nil, v.ID()),
   771  		Block:                       tbl,
   772  		DeferredCausets:             v.DeferredCausets,
   773  		GenExprs:                    v.GenDefCauss.Exprs,
   774  	}
   775  	loadDataInfo := &LoadDataInfo{
   776  		event:                      make([]types.Causet, 0, len(insertVal.insertDeferredCausets)),
   777  		InsertValues:               insertVal,
   778  		Path:                       v.Path,
   779  		Block:                      tbl,
   780  		FieldsInfo:                 v.FieldsInfo,
   781  		LinesInfo:                  v.LinesInfo,
   782  		IgnoreLines:                v.IgnoreLines,
   783  		DeferredCausetAssignments:  v.DeferredCausetAssignments,
   784  		DeferredCausetsAndUserVars: v.DeferredCausetsAndUserVars,
   785  		Ctx:                        b.ctx,
   786  	}
   787  	defCausumnNames := loadDataInfo.initFieldMappings()
   788  	err := loadDataInfo.initLoadDeferredCausets(defCausumnNames)
   789  	if err != nil {
   790  		b.err = err
   791  		return nil
   792  	}
   793  	loadDataInterDirc := &LoadDataInterDirc{
   794  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, nil, v.ID()),
   795  		IsLocal:                     v.IsLocal,
   796  		OnDuplicate:                 v.OnDuplicate,
   797  		loadDataInfo:                loadDataInfo,
   798  	}
   799  	var defaultLoadDataBatchCnt uint64 = 20000 // TODO this will be changed to variable in another pr
   800  	loadDataInterDirc.loadDataInfo.InitQueues()
   801  	loadDataInterDirc.loadDataInfo.SetMaxEventsInBatch(defaultLoadDataBatchCnt)
   802  
   803  	return loadDataInterDirc
   804  }
   805  
   806  func (b *interlockBuilder) buildLoadStats(v *causetembedded.LoadStats) InterlockingDirectorate {
   807  	e := &LoadStatsInterDirc{
   808  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, nil, v.ID()),
   809  		info:                        &LoadStatsInfo{v.Path, b.ctx},
   810  	}
   811  	return e
   812  }
   813  
   814  func (b *interlockBuilder) buildIndexAdvise(v *causetembedded.IndexAdvise) InterlockingDirectorate {
   815  	e := &IndexAdviseInterDirc{
   816  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, nil, v.ID()),
   817  		IsLocal:                     v.IsLocal,
   818  		indexAdviseInfo: &IndexAdviseInfo{
   819  			Path:        v.Path,
   820  			MaxMinutes:  v.MaxMinutes,
   821  			MaxIndexNum: v.MaxIndexNum,
   822  			LinesInfo:   v.LinesInfo,
   823  			Ctx:         b.ctx,
   824  		},
   825  	}
   826  	return e
   827  }
   828  
   829  func (b *interlockBuilder) buildReplace(vals *InsertValues) InterlockingDirectorate {
   830  	replaceInterDirc := &ReplaceInterDirc{
   831  		InsertValues: vals,
   832  	}
   833  	return replaceInterDirc
   834  }
   835  
   836  func (b *interlockBuilder) buildGrant(grant *ast.GrantStmt) InterlockingDirectorate {
   837  	e := &GrantInterDirc{
   838  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, nil, 0),
   839  		Privs:                       grant.Privs,
   840  		ObjectType:                  grant.ObjectType,
   841  		Level:                       grant.Level,
   842  		Users:                       grant.Users,
   843  		WithGrant:                   grant.WithGrant,
   844  		TLSOptions:                  grant.TLSOptions,
   845  		is:                          b.is,
   846  	}
   847  	return e
   848  }
   849  
   850  func (b *interlockBuilder) buildRevoke(revoke *ast.RevokeStmt) InterlockingDirectorate {
   851  	e := &RevokeInterDirc{
   852  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, nil, 0),
   853  		ctx:                         b.ctx,
   854  		Privs:                       revoke.Privs,
   855  		ObjectType:                  revoke.ObjectType,
   856  		Level:                       revoke.Level,
   857  		Users:                       revoke.Users,
   858  		is:                          b.is,
   859  	}
   860  	return e
   861  }
   862  
   863  func (b *interlockBuilder) buildDBS(v *causetembedded.DBS) InterlockingDirectorate {
   864  	e := &DBSInterDirc{
   865  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   866  		stmt:                        v.Statement,
   867  		is:                          b.is,
   868  	}
   869  	return e
   870  }
   871  
   872  // buildTrace builds a TraceInterDirc for future executing. This method will be called
   873  // at build().
   874  func (b *interlockBuilder) buildTrace(v *causetembedded.Trace) InterlockingDirectorate {
   875  	t := &TraceInterDirc{
   876  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   877  		stmtNode:                    v.StmtNode,
   878  		builder:                     b,
   879  		format:                      v.Format,
   880  	}
   881  	if t.format == causetembedded.TraceFormatLog {
   882  		return &SortInterDirc{
   883  			baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), t),
   884  			ByItems: []*causetutil.ByItems{
   885  				{Expr: &memex.DeferredCauset{
   886  					Index:   0,
   887  					RetType: types.NewFieldType(allegrosql.TypeTimestamp),
   888  				}},
   889  			},
   890  			schemaReplicant: v.Schema(),
   891  		}
   892  	}
   893  	return t
   894  }
   895  
   896  // buildExplain builds a explain interlock. `e.rows` defCauslects final result to `ExplainInterDirc`.
   897  func (b *interlockBuilder) buildExplain(v *causetembedded.Explain) InterlockingDirectorate {
   898  	explainInterDirc := &ExplainInterDirc{
   899  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
   900  		explain:                     v,
   901  	}
   902  	if v.Analyze {
   903  		if b.ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl == nil {
   904  			b.ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl = execdetails.NewRuntimeStatsDefCausl()
   905  		}
   906  		explainInterDirc.analyzeInterDirc = b.build(v.TargetCauset)
   907  	}
   908  	return explainInterDirc
   909  }
   910  
   911  func (b *interlockBuilder) buildSelectInto(v *causetembedded.SelectInto) InterlockingDirectorate {
   912  	child := b.build(v.TargetCauset)
   913  	if b.err != nil {
   914  		return nil
   915  	}
   916  	return &SelectIntoInterDirc{
   917  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), child),
   918  		intoOpt:                     v.IntoOpt,
   919  	}
   920  }
   921  
   922  func (b *interlockBuilder) buildUnionScanInterDirc(v *causetembedded.PhysicalUnionScan) InterlockingDirectorate {
   923  	reader := b.build(v.Children()[0])
   924  	if b.err != nil {
   925  		return nil
   926  	}
   927  
   928  	return b.buildUnionScanFromReader(reader, v)
   929  }
   930  
   931  // buildUnionScanFromReader builds union scan interlock from child interlock.
   932  // Note that this function may be called by inner workers of index lookup join concurrently.
   933  // Be careful to avoid data race.
   934  func (b *interlockBuilder) buildUnionScanFromReader(reader InterlockingDirectorate, v *causetembedded.PhysicalUnionScan) InterlockingDirectorate {
   935  	// Adjust UnionScan->PartitionBlock->Reader
   936  	// to PartitionBlock->UnionScan->Reader
   937  	// The build of UnionScan interlock is delay to the nextPartition() function
   938  	// because the Reader interlock is available there.
   939  	if x, ok := reader.(*PartitionBlockInterlockingDirectorate); ok {
   940  		nextPartitionForReader := x.nextPartition
   941  		x.nextPartition = nextPartitionForUnionScan{
   942  			b:     b,
   943  			us:    v,
   944  			child: nextPartitionForReader,
   945  		}
   946  		return x
   947  	}
   948  	// If reader is union, it means a partitiont causet and we should transfer as above.
   949  	if x, ok := reader.(*UnionInterDirc); ok {
   950  		for i, child := range x.children {
   951  			x.children[i] = b.buildUnionScanFromReader(child, v)
   952  			if b.err != nil {
   953  				return nil
   954  			}
   955  		}
   956  		return x
   957  	}
   958  	us := &UnionScanInterDirc{baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), reader)}
   959  	// Get the handle defCausumn index of the below Causet.
   960  	us.belowHandleDefCauss = v.HandleDefCauss
   961  	us.mublockEvent = chunk.MutEventFromTypes(retTypes(us))
   962  
   963  	// If the push-downed condition contains virtual defCausumn, we may build a selection upon reader
   964  	originReader := reader
   965  	if sel, ok := reader.(*SelectionInterDirc); ok {
   966  		reader = sel.children[0]
   967  	}
   968  
   969  	switch x := reader.(type) {
   970  	case *BlockReaderInterlockingDirectorate:
   971  		us.desc = x.desc
   972  		// Union scan can only be in a write transaction, so DirtyDB should has non-nil value now, thus
   973  		// GetDirtyDB() is safe here. If this causet has been modified in the transaction, non-nil DirtyBlock
   974  		// can be found in DirtyDB now, so GetDirtyBlock is safe; if this causet has not been modified in the
   975  		// transaction, empty DirtyBlock would be inserted into DirtyDB, it does not matter when multiple
   976  		// goroutines write empty DirtyBlock to DirtyDB for this causet concurrently. Although the DirtyDB looks
   977  		// safe for data race in all the cases, the map of golang will throw panic when it's accessed in parallel.
   978  		// So we dagger it when getting dirty causet.
   979  		us.conditions, us.conditionsWithVirDefCaus = causetembedded.SplitSelCondsWithVirtualDeferredCauset(v.Conditions)
   980  		us.defCausumns = x.defCausumns
   981  		us.causet = x.causet
   982  		us.virtualDeferredCausetIndex = x.virtualDeferredCausetIndex
   983  	case *IndexReaderInterlockingDirectorate:
   984  		us.desc = x.desc
   985  		for _, ic := range x.index.DeferredCausets {
   986  			for i, defCaus := range x.defCausumns {
   987  				if defCaus.Name.L == ic.Name.L {
   988  					us.usedIndex = append(us.usedIndex, i)
   989  					break
   990  				}
   991  			}
   992  		}
   993  		us.conditions, us.conditionsWithVirDefCaus = causetembedded.SplitSelCondsWithVirtualDeferredCauset(v.Conditions)
   994  		us.defCausumns = x.defCausumns
   995  		us.causet = x.causet
   996  	case *IndexLookUpInterlockingDirectorate:
   997  		us.desc = x.desc
   998  		for _, ic := range x.index.DeferredCausets {
   999  			for i, defCaus := range x.defCausumns {
  1000  				if defCaus.Name.L == ic.Name.L {
  1001  					us.usedIndex = append(us.usedIndex, i)
  1002  					break
  1003  				}
  1004  			}
  1005  		}
  1006  		us.conditions, us.conditionsWithVirDefCaus = causetembedded.SplitSelCondsWithVirtualDeferredCauset(v.Conditions)
  1007  		us.defCausumns = x.defCausumns
  1008  		us.causet = x.causet
  1009  		us.virtualDeferredCausetIndex = buildVirtualDeferredCausetIndex(us.Schema(), us.defCausumns)
  1010  	default:
  1011  		// The mem causet will not be written by allegrosql directly, so we can omit the union scan to avoid err reporting.
  1012  		return originReader
  1013  	}
  1014  	return us
  1015  }
  1016  
  1017  // buildMergeJoin builds MergeJoinInterDirc interlock.
  1018  func (b *interlockBuilder) buildMergeJoin(v *causetembedded.PhysicalMergeJoin) InterlockingDirectorate {
  1019  	leftInterDirc := b.build(v.Children()[0])
  1020  	if b.err != nil {
  1021  		return nil
  1022  	}
  1023  
  1024  	rightInterDirc := b.build(v.Children()[1])
  1025  	if b.err != nil {
  1026  		return nil
  1027  	}
  1028  
  1029  	defaultValues := v.DefaultValues
  1030  	if defaultValues == nil {
  1031  		if v.JoinType == causetembedded.RightOuterJoin {
  1032  			defaultValues = make([]types.Causet, leftInterDirc.Schema().Len())
  1033  		} else {
  1034  			defaultValues = make([]types.Causet, rightInterDirc.Schema().Len())
  1035  		}
  1036  	}
  1037  
  1038  	e := &MergeJoinInterDirc{
  1039  		stmtCtx:                     b.ctx.GetStochastikVars().StmtCtx,
  1040  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), leftInterDirc, rightInterDirc),
  1041  		compareFuncs:                v.CompareFuncs,
  1042  		joiner: newJoiner(
  1043  			b.ctx,
  1044  			v.JoinType,
  1045  			v.JoinType == causetembedded.RightOuterJoin,
  1046  			defaultValues,
  1047  			v.OtherConditions,
  1048  			retTypes(leftInterDirc),
  1049  			retTypes(rightInterDirc),
  1050  			markChildrenUsedDefCauss(v.Schema(), v.Children()[0].Schema(), v.Children()[1].Schema()),
  1051  		),
  1052  		isOuterJoin: v.JoinType.IsOuterJoin(),
  1053  		desc:        v.Desc,
  1054  	}
  1055  
  1056  	leftBlock := &mergeJoinBlock{
  1057  		childIndex: 0,
  1058  		joinKeys:   v.LeftJoinKeys,
  1059  		filters:    v.LeftConditions,
  1060  	}
  1061  	rightBlock := &mergeJoinBlock{
  1062  		childIndex: 1,
  1063  		joinKeys:   v.RightJoinKeys,
  1064  		filters:    v.RightConditions,
  1065  	}
  1066  
  1067  	if v.JoinType == causetembedded.RightOuterJoin {
  1068  		e.innerBlock = leftBlock
  1069  		e.outerBlock = rightBlock
  1070  	} else {
  1071  		e.innerBlock = rightBlock
  1072  		e.outerBlock = leftBlock
  1073  	}
  1074  	e.innerBlock.isInner = true
  1075  
  1076  	// optimizer should guarantee that filters on inner causet are pushed down
  1077  	// to einsteindb or extracted to a Selection.
  1078  	if len(e.innerBlock.filters) != 0 {
  1079  		b.err = errors.Annotate(ErrBuildInterlockingDirectorate, "merge join's inner filter should be empty.")
  1080  		return nil
  1081  	}
  1082  
  1083  	interlockCounterMergeJoinInterDirc.Inc()
  1084  	return e
  1085  }
  1086  
  1087  func (b *interlockBuilder) buildSideEstCount(v *causetembedded.PhysicalHashJoin) float64 {
  1088  	buildSide := v.Children()[v.InnerChildIdx]
  1089  	if v.UseOuterToBuild {
  1090  		buildSide = v.Children()[1-v.InnerChildIdx]
  1091  	}
  1092  	if buildSide.Stats().HistDefCausl == nil || buildSide.Stats().HistDefCausl.Pseudo {
  1093  		return 0.0
  1094  	}
  1095  	return buildSide.StatsCount()
  1096  }
  1097  
  1098  func (b *interlockBuilder) buildHashJoin(v *causetembedded.PhysicalHashJoin) InterlockingDirectorate {
  1099  	leftInterDirc := b.build(v.Children()[0])
  1100  	if b.err != nil {
  1101  		return nil
  1102  	}
  1103  
  1104  	rightInterDirc := b.build(v.Children()[1])
  1105  	if b.err != nil {
  1106  		return nil
  1107  	}
  1108  
  1109  	e := &HashJoinInterDirc{
  1110  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), leftInterDirc, rightInterDirc),
  1111  		concurrency:                 v.Concurrency,
  1112  		joinType:                    v.JoinType,
  1113  		isOuterJoin:                 v.JoinType.IsOuterJoin(),
  1114  		useOuterToBuild:             v.UseOuterToBuild,
  1115  	}
  1116  	defaultValues := v.DefaultValues
  1117  	lhsTypes, rhsTypes := retTypes(leftInterDirc), retTypes(rightInterDirc)
  1118  	if v.InnerChildIdx == 1 {
  1119  		if len(v.RightConditions) > 0 {
  1120  			b.err = errors.Annotate(ErrBuildInterlockingDirectorate, "join's inner condition should be empty")
  1121  			return nil
  1122  		}
  1123  	} else {
  1124  		if len(v.LeftConditions) > 0 {
  1125  			b.err = errors.Annotate(ErrBuildInterlockingDirectorate, "join's inner condition should be empty")
  1126  			return nil
  1127  		}
  1128  	}
  1129  
  1130  	// consider defCauslations
  1131  	leftTypes := make([]*types.FieldType, 0, len(retTypes(leftInterDirc)))
  1132  	for _, tp := range retTypes(leftInterDirc) {
  1133  		leftTypes = append(leftTypes, tp.Clone())
  1134  	}
  1135  	rightTypes := make([]*types.FieldType, 0, len(retTypes(rightInterDirc)))
  1136  	for _, tp := range retTypes(rightInterDirc) {
  1137  		rightTypes = append(rightTypes, tp.Clone())
  1138  	}
  1139  	leftIsBuildSide := true
  1140  
  1141  	e.isNullEQ = v.IsNullEQ
  1142  	if v.UseOuterToBuild {
  1143  		// uFIDelate the buildSideEstCount due to changing the build side
  1144  		if v.InnerChildIdx == 1 {
  1145  			e.buildSideInterDirc, e.buildKeys = leftInterDirc, v.LeftJoinKeys
  1146  			e.probeSideInterDirc, e.probeKeys = rightInterDirc, v.RightJoinKeys
  1147  			e.outerFilter = v.LeftConditions
  1148  		} else {
  1149  			e.buildSideInterDirc, e.buildKeys = rightInterDirc, v.RightJoinKeys
  1150  			e.probeSideInterDirc, e.probeKeys = leftInterDirc, v.LeftJoinKeys
  1151  			e.outerFilter = v.RightConditions
  1152  			leftIsBuildSide = false
  1153  		}
  1154  		if defaultValues == nil {
  1155  			defaultValues = make([]types.Causet, e.probeSideInterDirc.Schema().Len())
  1156  		}
  1157  	} else {
  1158  		if v.InnerChildIdx == 0 {
  1159  			e.buildSideInterDirc, e.buildKeys = leftInterDirc, v.LeftJoinKeys
  1160  			e.probeSideInterDirc, e.probeKeys = rightInterDirc, v.RightJoinKeys
  1161  			e.outerFilter = v.RightConditions
  1162  		} else {
  1163  			e.buildSideInterDirc, e.buildKeys = rightInterDirc, v.RightJoinKeys
  1164  			e.probeSideInterDirc, e.probeKeys = leftInterDirc, v.LeftJoinKeys
  1165  			e.outerFilter = v.LeftConditions
  1166  			leftIsBuildSide = false
  1167  		}
  1168  		if defaultValues == nil {
  1169  			defaultValues = make([]types.Causet, e.buildSideInterDirc.Schema().Len())
  1170  		}
  1171  	}
  1172  	e.buildSideEstCount = b.buildSideEstCount(v)
  1173  	childrenUsedSchema := markChildrenUsedDefCauss(v.Schema(), v.Children()[0].Schema(), v.Children()[1].Schema())
  1174  	e.joiners = make([]joiner, e.concurrency)
  1175  	for i := uint(0); i < e.concurrency; i++ {
  1176  		e.joiners[i] = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues,
  1177  			v.OtherConditions, lhsTypes, rhsTypes, childrenUsedSchema)
  1178  	}
  1179  	interlockCountHashJoinInterDirc.Inc()
  1180  
  1181  	for i := range v.EqualConditions {
  1182  		chs, defCausl := v.EqualConditions[i].CharsetAndDefCauslation(e.ctx)
  1183  		bt := leftTypes[v.LeftJoinKeys[i].Index]
  1184  		bt.Charset, bt.DefCauslate = chs, defCausl
  1185  		pt := rightTypes[v.RightJoinKeys[i].Index]
  1186  		pt.Charset, pt.DefCauslate = chs, defCausl
  1187  	}
  1188  	if leftIsBuildSide {
  1189  		e.buildTypes, e.probeTypes = leftTypes, rightTypes
  1190  	} else {
  1191  		e.buildTypes, e.probeTypes = rightTypes, leftTypes
  1192  	}
  1193  	return e
  1194  }
  1195  
  1196  func (b *interlockBuilder) buildHashAgg(v *causetembedded.PhysicalHashAgg) InterlockingDirectorate {
  1197  	src := b.build(v.Children()[0])
  1198  	if b.err != nil {
  1199  		return nil
  1200  	}
  1201  	stochastikVars := b.ctx.GetStochastikVars()
  1202  	e := &HashAggInterDirc{
  1203  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), src),
  1204  		sc:                          stochastikVars.StmtCtx,
  1205  		PartialAggFuncs:             make([]aggfuncs.AggFunc, 0, len(v.AggFuncs)),
  1206  		GroupByItems:                v.GroupByItems,
  1207  	}
  1208  	// We take `create causet t(a int, b int);` as example.
  1209  	//
  1210  	// 1. If all the aggregation functions are FIRST_ROW, we do not need to set the defaultVal for them:
  1211  	// e.g.
  1212  	// allegrosql> select distinct a, b from t;
  1213  	// 0 rows in set (0.00 sec)
  1214  	//
  1215  	// 2. If there exists group by items, we do not need to set the defaultVal for them either:
  1216  	// e.g.
  1217  	// allegrosql> select avg(a) from t group by b;
  1218  	// Empty set (0.00 sec)
  1219  	//
  1220  	// allegrosql> select avg(a) from t group by a;
  1221  	// +--------+
  1222  	// | avg(a) |
  1223  	// +--------+
  1224  	// |  NULL  |
  1225  	// +--------+
  1226  	// 1 event in set (0.00 sec)
  1227  	if len(v.GroupByItems) != 0 || aggregation.IsAllFirstEvent(v.AggFuncs) {
  1228  		e.defaultVal = nil
  1229  	} else {
  1230  		e.defaultVal = chunk.NewChunkWithCapacity(retTypes(e), 1)
  1231  	}
  1232  	for _, aggDesc := range v.AggFuncs {
  1233  		if aggDesc.HasDistinct || len(aggDesc.OrderByItems) > 0 {
  1234  			e.isUnparallelInterDirc = true
  1235  		}
  1236  	}
  1237  	// When we set both milevadb_hashagg_final_concurrency and milevadb_hashagg_partial_concurrency to 1,
  1238  	// we do not need to parallelly execute hash agg,
  1239  	// and this action can be a workaround when meeting some unexpected situation using parallelInterDirc.
  1240  	if finalCon, partialCon := stochastikVars.HashAggFinalConcurrency(), stochastikVars.HashAggPartialConcurrency(); finalCon <= 0 || partialCon <= 0 || finalCon == 1 && partialCon == 1 {
  1241  		e.isUnparallelInterDirc = true
  1242  	}
  1243  	partialOrdinal := 0
  1244  	for i, aggDesc := range v.AggFuncs {
  1245  		if e.isUnparallelInterDirc {
  1246  			e.PartialAggFuncs = append(e.PartialAggFuncs, aggfuncs.Build(b.ctx, aggDesc, i))
  1247  		} else {
  1248  			ordinal := []int{partialOrdinal}
  1249  			partialOrdinal++
  1250  			if aggDesc.Name == ast.AggFuncAvg {
  1251  				ordinal = append(ordinal, partialOrdinal+1)
  1252  				partialOrdinal++
  1253  			}
  1254  			partialAggDesc, finalDesc := aggDesc.Split(ordinal)
  1255  			partialAggFunc := aggfuncs.Build(b.ctx, partialAggDesc, i)
  1256  			finalAggFunc := aggfuncs.Build(b.ctx, finalDesc, i)
  1257  			e.PartialAggFuncs = append(e.PartialAggFuncs, partialAggFunc)
  1258  			e.FinalAggFuncs = append(e.FinalAggFuncs, finalAggFunc)
  1259  			if partialAggDesc.Name == ast.AggFuncGroupConcat {
  1260  				// For group_concat, finalAggFunc and partialAggFunc need shared `truncate` flag to do duplicate.
  1261  				finalAggFunc.(interface{ SetTruncated(t *int32) }).SetTruncated(
  1262  					partialAggFunc.(interface{ GetTruncated() *int32 }).GetTruncated(),
  1263  				)
  1264  			}
  1265  		}
  1266  		if e.defaultVal != nil {
  1267  			value := aggDesc.GetDefaultValue()
  1268  			e.defaultVal.AppendCauset(i, &value)
  1269  		}
  1270  	}
  1271  
  1272  	interlockCounterHashAggInterDirc.Inc()
  1273  	return e
  1274  }
  1275  
  1276  func (b *interlockBuilder) buildStreamAgg(v *causetembedded.PhysicalStreamAgg) InterlockingDirectorate {
  1277  	src := b.build(v.Children()[0])
  1278  	if b.err != nil {
  1279  		return nil
  1280  	}
  1281  	e := &StreamAggInterDirc{
  1282  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), src),
  1283  		groupChecker:                newVecGroupChecker(b.ctx, v.GroupByItems),
  1284  		aggFuncs:                    make([]aggfuncs.AggFunc, 0, len(v.AggFuncs)),
  1285  	}
  1286  	if len(v.GroupByItems) != 0 || aggregation.IsAllFirstEvent(v.AggFuncs) {
  1287  		e.defaultVal = nil
  1288  	} else {
  1289  		e.defaultVal = chunk.NewChunkWithCapacity(retTypes(e), 1)
  1290  	}
  1291  	for i, aggDesc := range v.AggFuncs {
  1292  		aggFunc := aggfuncs.Build(b.ctx, aggDesc, i)
  1293  		e.aggFuncs = append(e.aggFuncs, aggFunc)
  1294  		if e.defaultVal != nil {
  1295  			value := aggDesc.GetDefaultValue()
  1296  			e.defaultVal.AppendCauset(i, &value)
  1297  		}
  1298  	}
  1299  
  1300  	interlockStreamAggInterDirc.Inc()
  1301  	return e
  1302  }
  1303  
  1304  func (b *interlockBuilder) buildSelection(v *causetembedded.PhysicalSelection) InterlockingDirectorate {
  1305  	childInterDirc := b.build(v.Children()[0])
  1306  	if b.err != nil {
  1307  		return nil
  1308  	}
  1309  	e := &SelectionInterDirc{
  1310  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDirc),
  1311  		filters:                     v.Conditions,
  1312  	}
  1313  	return e
  1314  }
  1315  
  1316  func (b *interlockBuilder) buildProjection(v *causetembedded.PhysicalProjection) InterlockingDirectorate {
  1317  	childInterDirc := b.build(v.Children()[0])
  1318  	if b.err != nil {
  1319  		return nil
  1320  	}
  1321  	e := &ProjectionInterDirc{
  1322  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDirc),
  1323  		numWorkers:                  int64(b.ctx.GetStochastikVars().ProjectionConcurrency()),
  1324  		evaluatorSuit:               memex.NewEvaluatorSuite(v.Exprs, v.AvoidDeferredCausetEvaluator),
  1325  		calculateNoDelay:            v.CalculateNoDelay,
  1326  	}
  1327  
  1328  	// If the calculation event count for this Projection operator is smaller
  1329  	// than a Chunk size, we turn back to the un-parallel Projection
  1330  	// implementation to reduce the goroutine overhead.
  1331  	if int64(v.StatsCount()) < int64(b.ctx.GetStochastikVars().MaxChunkSize) {
  1332  		e.numWorkers = 0
  1333  	}
  1334  	return e
  1335  }
  1336  
  1337  func (b *interlockBuilder) buildBlockDual(v *causetembedded.PhysicalBlockDual) InterlockingDirectorate {
  1338  	if v.EventCount != 0 && v.EventCount != 1 {
  1339  		b.err = errors.Errorf("buildBlockDual failed, invalid event count for dual causet: %v", v.EventCount)
  1340  		return nil
  1341  	}
  1342  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())
  1343  	base.initCap = v.EventCount
  1344  	e := &BlockDualInterDirc{
  1345  		baseInterlockingDirectorate: base,
  1346  		numDualEvents:               v.EventCount,
  1347  	}
  1348  	return e
  1349  }
  1350  
  1351  func (b *interlockBuilder) getSnapshotTS() (uint64, error) {
  1352  	if b.snapshotTS != 0 {
  1353  		// Return the cached value.
  1354  		return b.snapshotTS, nil
  1355  	}
  1356  
  1357  	snapshotTS := b.ctx.GetStochastikVars().SnapshotTS
  1358  	txn, err := b.ctx.Txn(true)
  1359  	if err != nil {
  1360  		return 0, err
  1361  	}
  1362  	if snapshotTS == 0 {
  1363  		snapshotTS = txn.StartTS()
  1364  	}
  1365  	b.snapshotTS = snapshotTS
  1366  	if b.snapshotTS == 0 {
  1367  		return 0, errors.Trace(ErrGetStartTS)
  1368  	}
  1369  	return snapshotTS, nil
  1370  }
  1371  
  1372  func (b *interlockBuilder) buildMemBlock(v *causetembedded.PhysicalMemBlock) InterlockingDirectorate {
  1373  	switch v.DBName.L {
  1374  	case soliton.MetricSchemaName.L:
  1375  		return &MemBlockReaderInterDirc{
  1376  			baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1377  			causet:                      v.Block,
  1378  			retriever: &MetricRetriever{
  1379  				causet:    v.Block,
  1380  				extractor: v.Extractor.(*causetembedded.MetricBlockExtractor),
  1381  			},
  1382  		}
  1383  	case soliton.InformationSchemaName.L:
  1384  		switch v.Block.Name.L {
  1385  		case strings.ToLower(schemareplicant.BlockClusterConfig):
  1386  			return &MemBlockReaderInterDirc{
  1387  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1388  				causet:                      v.Block,
  1389  				retriever: &clusterConfigRetriever{
  1390  					extractor: v.Extractor.(*causetembedded.ClusterBlockExtractor),
  1391  				},
  1392  			}
  1393  		case strings.ToLower(schemareplicant.BlockClusterLoad):
  1394  			return &MemBlockReaderInterDirc{
  1395  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1396  				causet:                      v.Block,
  1397  				retriever: &clusterServerInfoRetriever{
  1398  					extractor:      v.Extractor.(*causetembedded.ClusterBlockExtractor),
  1399  					serverInfoType: diagnosticspb.ServerInfoType_LoadInfo,
  1400  				},
  1401  			}
  1402  		case strings.ToLower(schemareplicant.BlockClusterHardware):
  1403  			return &MemBlockReaderInterDirc{
  1404  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1405  				causet:                      v.Block,
  1406  				retriever: &clusterServerInfoRetriever{
  1407  					extractor:      v.Extractor.(*causetembedded.ClusterBlockExtractor),
  1408  					serverInfoType: diagnosticspb.ServerInfoType_HardwareInfo,
  1409  				},
  1410  			}
  1411  		case strings.ToLower(schemareplicant.BlockClusterSystemInfo):
  1412  			return &MemBlockReaderInterDirc{
  1413  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1414  				causet:                      v.Block,
  1415  				retriever: &clusterServerInfoRetriever{
  1416  					extractor:      v.Extractor.(*causetembedded.ClusterBlockExtractor),
  1417  					serverInfoType: diagnosticspb.ServerInfoType_SystemInfo,
  1418  				},
  1419  			}
  1420  		case strings.ToLower(schemareplicant.BlockClusterLog):
  1421  			return &MemBlockReaderInterDirc{
  1422  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1423  				causet:                      v.Block,
  1424  				retriever: &clusterLogRetriever{
  1425  					extractor: v.Extractor.(*causetembedded.ClusterLogBlockExtractor),
  1426  				},
  1427  			}
  1428  		case strings.ToLower(schemareplicant.BlockInspectionResult):
  1429  			return &MemBlockReaderInterDirc{
  1430  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1431  				causet:                      v.Block,
  1432  				retriever: &inspectionResultRetriever{
  1433  					extractor: v.Extractor.(*causetembedded.InspectionResultBlockExtractor),
  1434  					timeRange: v.QueryTimeRange,
  1435  				},
  1436  			}
  1437  		case strings.ToLower(schemareplicant.BlockInspectionSummary):
  1438  			return &MemBlockReaderInterDirc{
  1439  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1440  				causet:                      v.Block,
  1441  				retriever: &inspectionSummaryRetriever{
  1442  					causet:    v.Block,
  1443  					extractor: v.Extractor.(*causetembedded.InspectionSummaryBlockExtractor),
  1444  					timeRange: v.QueryTimeRange,
  1445  				},
  1446  			}
  1447  		case strings.ToLower(schemareplicant.BlockInspectionMemrules):
  1448  			return &MemBlockReaderInterDirc{
  1449  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1450  				causet:                      v.Block,
  1451  				retriever: &inspectionMemruleRetriever{
  1452  					extractor: v.Extractor.(*causetembedded.InspectionMemruleBlockExtractor),
  1453  				},
  1454  			}
  1455  		case strings.ToLower(schemareplicant.BlockMetricSummary):
  1456  			return &MemBlockReaderInterDirc{
  1457  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1458  				causet:                      v.Block,
  1459  				retriever: &MetricsSummaryRetriever{
  1460  					causet:    v.Block,
  1461  					extractor: v.Extractor.(*causetembedded.MetricSummaryBlockExtractor),
  1462  					timeRange: v.QueryTimeRange,
  1463  				},
  1464  			}
  1465  		case strings.ToLower(schemareplicant.BlockMetricSummaryByLabel):
  1466  			return &MemBlockReaderInterDirc{
  1467  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1468  				causet:                      v.Block,
  1469  				retriever: &MetricsSummaryByLabelRetriever{
  1470  					causet:    v.Block,
  1471  					extractor: v.Extractor.(*causetembedded.MetricSummaryBlockExtractor),
  1472  					timeRange: v.QueryTimeRange,
  1473  				},
  1474  			}
  1475  		case strings.ToLower(schemareplicant.BlockSchemata),
  1476  			strings.ToLower(schemareplicant.BlockStatistics),
  1477  			strings.ToLower(schemareplicant.BlockMilevaDBIndexes),
  1478  			strings.ToLower(schemareplicant.BlockViews),
  1479  			strings.ToLower(schemareplicant.BlockBlocks),
  1480  			strings.ToLower(schemareplicant.BlockSequences),
  1481  			strings.ToLower(schemareplicant.BlockPartitions),
  1482  			strings.ToLower(schemareplicant.BlockEngines),
  1483  			strings.ToLower(schemareplicant.BlockDefCauslations),
  1484  			strings.ToLower(schemareplicant.BlockAnalyzeStatus),
  1485  			strings.ToLower(schemareplicant.BlockClusterInfo),
  1486  			strings.ToLower(schemareplicant.BlockProfiling),
  1487  			strings.ToLower(schemareplicant.BlockCharacterSets),
  1488  			strings.ToLower(schemareplicant.BlockKeyDeferredCauset),
  1489  			strings.ToLower(schemareplicant.BlockUserPrivileges),
  1490  			strings.ToLower(schemareplicant.BlockMetricBlocks),
  1491  			strings.ToLower(schemareplicant.BlockDefCauslationCharacterSetApplicability),
  1492  			strings.ToLower(schemareplicant.BlockProcesslist),
  1493  			strings.ToLower(schemareplicant.ClusterBlockProcesslist),
  1494  			strings.ToLower(schemareplicant.BlockEinsteinDBRegionStatus),
  1495  			strings.ToLower(schemareplicant.BlockEinsteinDBRegionPeers),
  1496  			strings.ToLower(schemareplicant.BlockMilevaDBHotRegions),
  1497  			strings.ToLower(schemareplicant.BlockStochastikVar),
  1498  			strings.ToLower(schemareplicant.BlockConstraints),
  1499  			strings.ToLower(schemareplicant.BlockTiFlashReplica),
  1500  			strings.ToLower(schemareplicant.BlockMilevaDBServersInfo),
  1501  			strings.ToLower(schemareplicant.BlockEinsteinDBStoreStatus),
  1502  			strings.ToLower(schemareplicant.BlockStatementsSummary),
  1503  			strings.ToLower(schemareplicant.BlockStatementsSummaryHistory),
  1504  			strings.ToLower(schemareplicant.ClusterBlockStatementsSummary),
  1505  			strings.ToLower(schemareplicant.ClusterBlockStatementsSummaryHistory):
  1506  			return &MemBlockReaderInterDirc{
  1507  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1508  				causet:                      v.Block,
  1509  				retriever: &memblockRetriever{
  1510  					causet:      v.Block,
  1511  					defCausumns: v.DeferredCausets,
  1512  				},
  1513  			}
  1514  		case strings.ToLower(schemareplicant.BlockDeferredCausets):
  1515  			return &MemBlockReaderInterDirc{
  1516  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1517  				causet:                      v.Block,
  1518  				retriever: &hugeMemBlockRetriever{
  1519  					causet:      v.Block,
  1520  					defCausumns: v.DeferredCausets,
  1521  				},
  1522  			}
  1523  
  1524  		case strings.ToLower(schemareplicant.BlockSlowQuery), strings.ToLower(schemareplicant.ClusterBlockSlowLog):
  1525  			return &MemBlockReaderInterDirc{
  1526  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1527  				causet:                      v.Block,
  1528  				retriever: &slowQueryRetriever{
  1529  					causet:         v.Block,
  1530  					outputDefCauss: v.DeferredCausets,
  1531  					extractor:      v.Extractor.(*causetembedded.SlowQueryExtractor),
  1532  				},
  1533  			}
  1534  		case strings.ToLower(schemareplicant.BlockStorageStats):
  1535  			return &MemBlockReaderInterDirc{
  1536  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1537  				causet:                      v.Block,
  1538  				retriever: &blockStorageStatsRetriever{
  1539  					causet:         v.Block,
  1540  					outputDefCauss: v.DeferredCausets,
  1541  					extractor:      v.Extractor.(*causetembedded.BlockStorageStatsExtractor),
  1542  				},
  1543  			}
  1544  		case strings.ToLower(schemareplicant.BlockDBSJobs):
  1545  			return &DBSJobsReaderInterDirc{
  1546  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1547  				is:                          b.is,
  1548  			}
  1549  		case strings.ToLower(schemareplicant.BlockTiFlashBlocks),
  1550  			strings.ToLower(schemareplicant.BlockTiFlashSegments):
  1551  			return &MemBlockReaderInterDirc{
  1552  				baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1553  				causet:                      v.Block,
  1554  				retriever: &TiFlashSystemBlockRetriever{
  1555  					causet:         v.Block,
  1556  					outputDefCauss: v.DeferredCausets,
  1557  					extractor:      v.Extractor.(*causetembedded.TiFlashSystemBlockExtractor),
  1558  				},
  1559  			}
  1560  		}
  1561  	}
  1562  	tb, _ := b.is.BlockByID(v.Block.ID)
  1563  	return &BlockScanInterDirc{
  1564  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  1565  		t:                           tb,
  1566  		defCausumns:                 v.DeferredCausets,
  1567  	}
  1568  }
  1569  
  1570  func (b *interlockBuilder) buildSort(v *causetembedded.PhysicalSort) InterlockingDirectorate {
  1571  	childInterDirc := b.build(v.Children()[0])
  1572  	if b.err != nil {
  1573  		return nil
  1574  	}
  1575  	sortInterDirc := SortInterDirc{
  1576  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDirc),
  1577  		ByItems:                     v.ByItems,
  1578  		schemaReplicant:             v.Schema(),
  1579  	}
  1580  	interlockCounterSortInterDirc.Inc()
  1581  	return &sortInterDirc
  1582  }
  1583  
  1584  func (b *interlockBuilder) buildTopN(v *causetembedded.PhysicalTopN) InterlockingDirectorate {
  1585  	childInterDirc := b.build(v.Children()[0])
  1586  	if b.err != nil {
  1587  		return nil
  1588  	}
  1589  	sortInterDirc := SortInterDirc{
  1590  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDirc),
  1591  		ByItems:                     v.ByItems,
  1592  		schemaReplicant:             v.Schema(),
  1593  	}
  1594  	interlockCounterTopNInterDirc.Inc()
  1595  	return &TopNInterDirc{
  1596  		SortInterDirc: sortInterDirc,
  1597  		limit:         &causetembedded.PhysicalLimit{Count: v.Count, Offset: v.Offset},
  1598  	}
  1599  }
  1600  
  1601  func (b *interlockBuilder) buildApply(v *causetembedded.PhysicalApply) InterlockingDirectorate {
  1602  	var (
  1603  		innerCauset causetembedded.PhysicalCauset
  1604  		outerCauset causetembedded.PhysicalCauset
  1605  	)
  1606  	if v.InnerChildIdx == 0 {
  1607  		innerCauset = v.Children()[0]
  1608  		outerCauset = v.Children()[1]
  1609  	} else {
  1610  		innerCauset = v.Children()[1]
  1611  		outerCauset = v.Children()[0]
  1612  	}
  1613  	v.OuterSchema = causetembedded.ExtractCorDeferredCausetsBySchema4PhysicalCauset(innerCauset, outerCauset.Schema())
  1614  	leftChild := b.build(v.Children()[0])
  1615  	if b.err != nil {
  1616  		return nil
  1617  	}
  1618  	rightChild := b.build(v.Children()[1])
  1619  	if b.err != nil {
  1620  		return nil
  1621  	}
  1622  	otherConditions := append(memex.ScalarFuncs2Exprs(v.EqualConditions), v.OtherConditions...)
  1623  	defaultValues := v.DefaultValues
  1624  	if defaultValues == nil {
  1625  		defaultValues = make([]types.Causet, v.Children()[v.InnerChildIdx].Schema().Len())
  1626  	}
  1627  	outerInterDirc, innerInterDirc := leftChild, rightChild
  1628  	outerFilter, innerFilter := v.LeftConditions, v.RightConditions
  1629  	if v.InnerChildIdx == 0 {
  1630  		outerInterDirc, innerInterDirc = rightChild, leftChild
  1631  		outerFilter, innerFilter = v.RightConditions, v.LeftConditions
  1632  	}
  1633  	tupleJoiner := newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0,
  1634  		defaultValues, otherConditions, retTypes(leftChild), retTypes(rightChild), nil)
  1635  	serialInterDirc := &NestedLoopApplyInterDirc{
  1636  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), outerInterDirc, innerInterDirc),
  1637  		innerInterDirc:              innerInterDirc,
  1638  		outerInterDirc:              outerInterDirc,
  1639  		outerFilter:                 outerFilter,
  1640  		innerFilter:                 innerFilter,
  1641  		outer:                       v.JoinType != causetembedded.InnerJoin,
  1642  		joiner:                      tupleJoiner,
  1643  		outerSchema:                 v.OuterSchema,
  1644  		ctx:                         b.ctx,
  1645  		canUseCache:                 v.CanUseCache,
  1646  	}
  1647  	interlockCounterNestedLoopApplyInterDirc.Inc()
  1648  
  1649  	// try parallel mode
  1650  	if v.Concurrency > 1 {
  1651  		innerInterDircs := make([]InterlockingDirectorate, 0, v.Concurrency)
  1652  		innerFilters := make([]memex.CNFExprs, 0, v.Concurrency)
  1653  		corDefCauss := make([][]*memex.CorrelatedDeferredCauset, 0, v.Concurrency)
  1654  		joiners := make([]joiner, 0, v.Concurrency)
  1655  		for i := 0; i < v.Concurrency; i++ {
  1656  			clonedInnerCauset, err := causetembedded.SafeClone(innerCauset)
  1657  			if err != nil {
  1658  				b.err = nil
  1659  				return serialInterDirc
  1660  			}
  1661  			corDefCaus := causetembedded.ExtractCorDeferredCausetsBySchema4PhysicalCauset(clonedInnerCauset, outerCauset.Schema())
  1662  			clonedInnerInterDirc := b.build(clonedInnerCauset)
  1663  			if b.err != nil {
  1664  				b.err = nil
  1665  				return serialInterDirc
  1666  			}
  1667  			innerInterDircs = append(innerInterDircs, clonedInnerInterDirc)
  1668  			corDefCauss = append(corDefCauss, corDefCaus)
  1669  			innerFilters = append(innerFilters, innerFilter.Clone())
  1670  			joiners = append(joiners, newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0,
  1671  				defaultValues, otherConditions, retTypes(leftChild), retTypes(rightChild), nil))
  1672  		}
  1673  
  1674  		allInterDircs := append([]InterlockingDirectorate{outerInterDirc}, innerInterDircs...)
  1675  
  1676  		return &ParallelNestedLoopApplyInterDirc{
  1677  			baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), allInterDircs...),
  1678  			innerInterDircs:             innerInterDircs,
  1679  			outerInterDirc:              outerInterDirc,
  1680  			outerFilter:                 outerFilter,
  1681  			innerFilter:                 innerFilters,
  1682  			outer:                       v.JoinType != causetembedded.InnerJoin,
  1683  			joiners:                     joiners,
  1684  			corDefCauss:                 corDefCauss,
  1685  			concurrency:                 v.Concurrency,
  1686  			useCache:                    true,
  1687  		}
  1688  	}
  1689  	return serialInterDirc
  1690  }
  1691  
  1692  func (b *interlockBuilder) buildMaxOneEvent(v *causetembedded.PhysicalMaxOneEvent) InterlockingDirectorate {
  1693  	childInterDirc := b.build(v.Children()[0])
  1694  	if b.err != nil {
  1695  		return nil
  1696  	}
  1697  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDirc)
  1698  	base.initCap = 2
  1699  	base.maxChunkSize = 2
  1700  	e := &MaxOneEventInterDirc{baseInterlockingDirectorate: base}
  1701  	return e
  1702  }
  1703  
  1704  func (b *interlockBuilder) buildUnionAll(v *causetembedded.PhysicalUnionAll) InterlockingDirectorate {
  1705  	childInterDircs := make([]InterlockingDirectorate, len(v.Children()))
  1706  	for i, child := range v.Children() {
  1707  		childInterDircs[i] = b.build(child)
  1708  		if b.err != nil {
  1709  			return nil
  1710  		}
  1711  	}
  1712  	e := &UnionInterDirc{
  1713  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDircs...),
  1714  		concurrency:                 b.ctx.GetStochastikVars().UnionConcurrency(),
  1715  	}
  1716  	return e
  1717  }
  1718  
  1719  func buildHandleDefCaussForSplit(sc *stmtctx.StatementContext, tbInfo *perceptron.BlockInfo) causetembedded.HandleDefCauss {
  1720  	if tbInfo.IsCommonHandle {
  1721  		primaryIdx := blocks.FindPrimaryIndex(tbInfo)
  1722  		blockDefCauss := make([]*memex.DeferredCauset, len(tbInfo.DeferredCausets))
  1723  		for i, defCaus := range tbInfo.DeferredCausets {
  1724  			blockDefCauss[i] = &memex.DeferredCauset{
  1725  				ID:      defCaus.ID,
  1726  				RetType: &defCaus.FieldType,
  1727  			}
  1728  		}
  1729  		for i, pkDefCaus := range primaryIdx.DeferredCausets {
  1730  			blockDefCauss[pkDefCaus.Offset].Index = i
  1731  		}
  1732  		return causetembedded.NewCommonHandleDefCauss(sc, tbInfo, primaryIdx, blockDefCauss)
  1733  	}
  1734  	intDefCaus := &memex.DeferredCauset{
  1735  		RetType: types.NewFieldType(allegrosql.TypeLonglong),
  1736  	}
  1737  	return causetembedded.NewIntHandleDefCauss(intDefCaus)
  1738  }
  1739  
  1740  func (b *interlockBuilder) buildSplitRegion(v *causetembedded.SplitRegion) InterlockingDirectorate {
  1741  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())
  1742  	base.initCap = 1
  1743  	base.maxChunkSize = 1
  1744  	if v.IndexInfo != nil {
  1745  		return &SplitIndexRegionInterDirc{
  1746  			baseInterlockingDirectorate: base,
  1747  			blockInfo:                   v.BlockInfo,
  1748  			partitionNames:              v.PartitionNames,
  1749  			indexInfo:                   v.IndexInfo,
  1750  			lower:                       v.Lower,
  1751  			upper:                       v.Upper,
  1752  			num:                         v.Num,
  1753  			valueLists:                  v.ValueLists,
  1754  		}
  1755  	}
  1756  	handleDefCauss := buildHandleDefCaussForSplit(b.ctx.GetStochastikVars().StmtCtx, v.BlockInfo)
  1757  	if len(v.ValueLists) > 0 {
  1758  		return &SplitBlockRegionInterDirc{
  1759  			baseInterlockingDirectorate: base,
  1760  			blockInfo:                   v.BlockInfo,
  1761  			partitionNames:              v.PartitionNames,
  1762  			handleDefCauss:              handleDefCauss,
  1763  			valueLists:                  v.ValueLists,
  1764  		}
  1765  	}
  1766  	return &SplitBlockRegionInterDirc{
  1767  		baseInterlockingDirectorate: base,
  1768  		blockInfo:                   v.BlockInfo,
  1769  		partitionNames:              v.PartitionNames,
  1770  		handleDefCauss:              handleDefCauss,
  1771  		lower:                       v.Lower,
  1772  		upper:                       v.Upper,
  1773  		num:                         v.Num,
  1774  	}
  1775  }
  1776  
  1777  func (b *interlockBuilder) buildUFIDelate(v *causetembedded.UFIDelate) InterlockingDirectorate {
  1778  	tblID2block := make(map[int64]causet.Block, len(v.TblDefCausPosInfos))
  1779  	for _, info := range v.TblDefCausPosInfos {
  1780  		tbl, _ := b.is.BlockByID(info.TblID)
  1781  		tblID2block[info.TblID] = tbl
  1782  		if len(v.PartitionedBlock) > 0 {
  1783  			// The v.PartitionedBlock defCauslects the partitioned causet.
  1784  			// Replace the original causet with the partitioned causet to support partition selection.
  1785  			// e.g. uFIDelate t partition (p0, p1), the new values are not belong to the given set p0, p1
  1786  			// Using the causet in v.PartitionedBlock returns a proper error, while using the original causet can't.
  1787  			for _, p := range v.PartitionedBlock {
  1788  				if info.TblID == p.Meta().ID {
  1789  					tblID2block[info.TblID] = p
  1790  				}
  1791  			}
  1792  		}
  1793  	}
  1794  	if b.err = b.uFIDelateForUFIDelateTSIfNeeded(v.SelectCauset); b.err != nil {
  1795  		return nil
  1796  	}
  1797  	b.snapshotTS = b.ctx.GetStochastikVars().TxnCtx.GetForUFIDelateTS()
  1798  	selInterDirc := b.build(v.SelectCauset)
  1799  	if b.err != nil {
  1800  		return nil
  1801  	}
  1802  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), selInterDirc)
  1803  	base.initCap = chunk.ZeroCapacity
  1804  	uFIDelateInterDirc := &UFIDelateInterDirc{
  1805  		baseInterlockingDirectorate: base,
  1806  		OrderedList:                 v.OrderedList,
  1807  		allAssignmentsAreConstant:   v.AllAssignmentsAreConstant,
  1808  		tblID2block:                 tblID2block,
  1809  		tblDefCausPosInfos:          v.TblDefCausPosInfos,
  1810  	}
  1811  	return uFIDelateInterDirc
  1812  }
  1813  
  1814  func (b *interlockBuilder) buildDelete(v *causetembedded.Delete) InterlockingDirectorate {
  1815  	tblID2block := make(map[int64]causet.Block, len(v.TblDefCausPosInfos))
  1816  	for _, info := range v.TblDefCausPosInfos {
  1817  		tblID2block[info.TblID], _ = b.is.BlockByID(info.TblID)
  1818  	}
  1819  	if b.err = b.uFIDelateForUFIDelateTSIfNeeded(v.SelectCauset); b.err != nil {
  1820  		return nil
  1821  	}
  1822  	b.snapshotTS = b.ctx.GetStochastikVars().TxnCtx.GetForUFIDelateTS()
  1823  	selInterDirc := b.build(v.SelectCauset)
  1824  	if b.err != nil {
  1825  		return nil
  1826  	}
  1827  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), selInterDirc)
  1828  	base.initCap = chunk.ZeroCapacity
  1829  	deleteInterDirc := &DeleteInterDirc{
  1830  		baseInterlockingDirectorate: base,
  1831  		tblID2Block:                 tblID2block,
  1832  		IsMultiBlock:                v.IsMultiBlock,
  1833  		tblDefCausPosInfos:          v.TblDefCausPosInfos,
  1834  	}
  1835  	return deleteInterDirc
  1836  }
  1837  
  1838  // uFIDelateForUFIDelateTSIfNeeded uFIDelates the ForUFIDelateTS for a pessimistic transaction if needed.
  1839  // PointGet interlock will get conflict error if the ForUFIDelateTS is older than the latest commitTS,
  1840  // so we don't need to uFIDelate now for better latency.
  1841  func (b *interlockBuilder) uFIDelateForUFIDelateTSIfNeeded(selectCauset causetembedded.PhysicalCauset) error {
  1842  	txnCtx := b.ctx.GetStochastikVars().TxnCtx
  1843  	if !txnCtx.IsPessimistic {
  1844  		return nil
  1845  	}
  1846  	if _, ok := selectCauset.(*causetembedded.PointGetCauset); ok {
  1847  		return nil
  1848  	}
  1849  	// Activate the invalid txn, use the txn startTS as newForUFIDelateTS
  1850  	txn, err := b.ctx.Txn(false)
  1851  	if err != nil {
  1852  		return err
  1853  	}
  1854  	if !txn.Valid() {
  1855  		_, err := b.ctx.Txn(true)
  1856  		if err != nil {
  1857  			return err
  1858  		}
  1859  		return nil
  1860  	}
  1861  	// The Repeablock Read transaction use Read Committed level to read data for writing (insert, uFIDelate, delete, select for uFIDelate),
  1862  	// We should always uFIDelate/refresh the for-uFIDelate-ts no matter the isolation level is RR or RC.
  1863  	if b.ctx.GetStochastikVars().IsPessimisticReadConsistency() {
  1864  		return b.refreshForUFIDelateTSForRC()
  1865  	}
  1866  	return UFIDelateForUFIDelateTS(b.ctx, 0)
  1867  }
  1868  
  1869  // refreshForUFIDelateTSForRC is used to refresh the for-uFIDelate-ts for reading data at read consistency level in pessimistic transaction.
  1870  // It could use the cached tso from the memex future to avoid get tso many times.
  1871  func (b *interlockBuilder) refreshForUFIDelateTSForRC() error {
  1872  	defer func() {
  1873  		b.snapshotTS = b.ctx.GetStochastikVars().TxnCtx.GetForUFIDelateTS()
  1874  	}()
  1875  	future := b.ctx.GetStochastikVars().TxnCtx.GetStmtFutureForRC()
  1876  	if future == nil {
  1877  		return nil
  1878  	}
  1879  	newForUFIDelateTS, waitErr := future.Wait()
  1880  	if waitErr != nil {
  1881  		logutil.BgLogger().Warn("wait tso failed",
  1882  			zap.Uint64("startTS", b.ctx.GetStochastikVars().TxnCtx.StartTS),
  1883  			zap.Error(waitErr))
  1884  	}
  1885  	b.ctx.GetStochastikVars().TxnCtx.SetStmtFutureForRC(nil)
  1886  	// If newForUFIDelateTS is 0, it will force to get a new for-uFIDelate-ts from FIDel.
  1887  	return UFIDelateForUFIDelateTS(b.ctx, newForUFIDelateTS)
  1888  }
  1889  
  1890  func (b *interlockBuilder) buildAnalyzeIndexPushdown(task causetembedded.AnalyzeIndexTask, opts map[ast.AnalyzeOptionType]uint64, autoAnalyze string) *analyzeTask {
  1891  	_, offset := timeutil.Zone(b.ctx.GetStochastikVars().Location())
  1892  	sc := b.ctx.GetStochastikVars().StmtCtx
  1893  	e := &AnalyzeIndexInterDirc{
  1894  		ctx:            b.ctx,
  1895  		blockID:        task.BlockID,
  1896  		isCommonHandle: task.TblInfo.IsCommonHandle,
  1897  		idxInfo:        task.IndexInfo,
  1898  		concurrency:    b.ctx.GetStochastikVars().IndexSerialScanConcurrency(),
  1899  		analyzePB: &fidelpb.AnalyzeReq{
  1900  			Tp:             fidelpb.AnalyzeType_TypeIndex,
  1901  			Flags:          sc.PushDownFlags(),
  1902  			TimeZoneOffset: offset,
  1903  		},
  1904  		opts: opts,
  1905  	}
  1906  	e.analyzePB.IdxReq = &fidelpb.AnalyzeIndexReq{
  1907  		BucketSize:         int64(opts[ast.AnalyzeOptNumBuckets]),
  1908  		NumDeferredCausets: int32(len(task.IndexInfo.DeferredCausets)),
  1909  	}
  1910  	if e.isCommonHandle && e.idxInfo.Primary {
  1911  		e.analyzePB.Tp = fidelpb.AnalyzeType_TypeCommonHandle
  1912  	}
  1913  	depth := int32(opts[ast.AnalyzeOptCMSketchDepth])
  1914  	width := int32(opts[ast.AnalyzeOptCMSketchWidth])
  1915  	e.analyzePB.IdxReq.CmsketchDepth = &depth
  1916  	e.analyzePB.IdxReq.CmsketchWidth = &width
  1917  	job := &statistics.AnalyzeJob{DBName: task.DBName, BlockName: task.BlockName, PartitionName: task.PartitionName, JobInfo: autoAnalyze + "analyze index " + task.IndexInfo.Name.O}
  1918  	return &analyzeTask{taskType: idxTask, idxInterDirc: e, job: job}
  1919  }
  1920  
  1921  func (b *interlockBuilder) buildAnalyzeIndexIncremental(task causetembedded.AnalyzeIndexTask, opts map[ast.AnalyzeOptionType]uint64) *analyzeTask {
  1922  	h := petri.GetPetri(b.ctx).StatsHandle()
  1923  	statsTbl := h.GetPartitionStats(&perceptron.BlockInfo{}, task.BlockID.PersistID)
  1924  	analyzeTask := b.buildAnalyzeIndexPushdown(task, opts, "")
  1925  	if statsTbl.Pseudo {
  1926  		return analyzeTask
  1927  	}
  1928  	idx, ok := statsTbl.Indices[task.IndexInfo.ID]
  1929  	if !ok || idx.Len() == 0 || idx.LastAnalyzePos.IsNull() {
  1930  		return analyzeTask
  1931  	}
  1932  	var oldHist *statistics.Histogram
  1933  	if statistics.IsAnalyzed(idx.Flag) {
  1934  		exec := analyzeTask.idxInterDirc
  1935  		if idx.CMSketch != nil {
  1936  			width, depth := idx.CMSketch.GetWidthAndDepth()
  1937  			exec.analyzePB.IdxReq.CmsketchWidth = &width
  1938  			exec.analyzePB.IdxReq.CmsketchDepth = &depth
  1939  		}
  1940  		oldHist = idx.Histogram.Copy()
  1941  	} else {
  1942  		_, bktID := idx.LessEventCountWithBktIdx(idx.LastAnalyzePos)
  1943  		if bktID == 0 {
  1944  			return analyzeTask
  1945  		}
  1946  		oldHist = idx.TruncateHistogram(bktID)
  1947  	}
  1948  	oldHist = oldHist.RemoveUpperBound()
  1949  	analyzeTask.taskType = idxIncrementalTask
  1950  	analyzeTask.idxIncrementalInterDirc = &analyzeIndexIncrementalInterDirc{AnalyzeIndexInterDirc: *analyzeTask.idxInterDirc, oldHist: oldHist, oldCMS: idx.CMSketch}
  1951  	analyzeTask.job = &statistics.AnalyzeJob{DBName: task.DBName, BlockName: task.BlockName, PartitionName: task.PartitionName, JobInfo: "analyze incremental index " + task.IndexInfo.Name.O}
  1952  	return analyzeTask
  1953  }
  1954  
  1955  func (b *interlockBuilder) buildAnalyzeDeferredCausetsPushdown(task causetembedded.AnalyzeDeferredCausetsTask, opts map[ast.AnalyzeOptionType]uint64, autoAnalyze string) *analyzeTask {
  1956  	defcaus := task.DefCaussInfo
  1957  	if hasPkHist(task.HandleDefCauss) {
  1958  		defCausInfo := task.TblInfo.DeferredCausets[task.HandleDefCauss.GetDefCaus(0).Index]
  1959  		defcaus = append([]*perceptron.DeferredCausetInfo{defCausInfo}, defcaus...)
  1960  	} else if task.HandleDefCauss != nil && !task.HandleDefCauss.IsInt() {
  1961  		defcaus = make([]*perceptron.DeferredCausetInfo, 0, len(task.DefCaussInfo)+task.HandleDefCauss.NumDefCauss())
  1962  		for i := 0; i < task.HandleDefCauss.NumDefCauss(); i++ {
  1963  			defcaus = append(defcaus, task.TblInfo.DeferredCausets[task.HandleDefCauss.GetDefCaus(i).Index])
  1964  		}
  1965  		defcaus = append(defcaus, task.DefCaussInfo...)
  1966  		task.DefCaussInfo = defcaus
  1967  	}
  1968  
  1969  	_, offset := timeutil.Zone(b.ctx.GetStochastikVars().Location())
  1970  	sc := b.ctx.GetStochastikVars().StmtCtx
  1971  	e := &AnalyzeDeferredCausetsInterDirc{
  1972  		ctx:            b.ctx,
  1973  		blockID:        task.BlockID,
  1974  		defcausInfo:    task.DefCaussInfo,
  1975  		handleDefCauss: task.HandleDefCauss,
  1976  		concurrency:    b.ctx.GetStochastikVars().DistALLEGROSQLScanConcurrency(),
  1977  		analyzePB: &fidelpb.AnalyzeReq{
  1978  			Tp:             fidelpb.AnalyzeType_TypeDeferredCauset,
  1979  			Flags:          sc.PushDownFlags(),
  1980  			TimeZoneOffset: offset,
  1981  		},
  1982  		opts: opts,
  1983  	}
  1984  	depth := int32(opts[ast.AnalyzeOptCMSketchDepth])
  1985  	width := int32(opts[ast.AnalyzeOptCMSketchWidth])
  1986  	e.analyzePB.DefCausReq = &fidelpb.AnalyzeDeferredCausetsReq{
  1987  		BucketSize:          int64(opts[ast.AnalyzeOptNumBuckets]),
  1988  		SampleSize:          maxRegionSampleSize,
  1989  		SketchSize:          maxSketchSize,
  1990  		DeferredCausetsInfo: soliton.DeferredCausetsToProto(defcaus, task.HandleDefCauss != nil && task.HandleDefCauss.IsInt()),
  1991  		CmsketchDepth:       &depth,
  1992  		CmsketchWidth:       &width,
  1993  	}
  1994  	if task.TblInfo != nil {
  1995  		e.analyzePB.DefCausReq.PrimaryDeferredCausetIds = blocks.TryGetCommonPkDeferredCausetIds(task.TblInfo)
  1996  	}
  1997  	b.err = causetembedded.SetPBDeferredCausetsDefaultValue(b.ctx, e.analyzePB.DefCausReq.DeferredCausetsInfo, defcaus)
  1998  	job := &statistics.AnalyzeJob{DBName: task.DBName, BlockName: task.BlockName, PartitionName: task.PartitionName, JobInfo: autoAnalyze + "analyze defCausumns"}
  1999  	return &analyzeTask{taskType: defCausTask, defCausInterDirc: e, job: job}
  2000  }
  2001  
  2002  func (b *interlockBuilder) buildAnalyzePKIncremental(task causetembedded.AnalyzeDeferredCausetsTask, opts map[ast.AnalyzeOptionType]uint64) *analyzeTask {
  2003  	h := petri.GetPetri(b.ctx).StatsHandle()
  2004  	statsTbl := h.GetPartitionStats(&perceptron.BlockInfo{}, task.BlockID.PersistID)
  2005  	analyzeTask := b.buildAnalyzeDeferredCausetsPushdown(task, opts, "")
  2006  	if statsTbl.Pseudo {
  2007  		return analyzeTask
  2008  	}
  2009  	if task.HandleDefCauss == nil || !task.HandleDefCauss.IsInt() {
  2010  		return analyzeTask
  2011  	}
  2012  	defCaus, ok := statsTbl.DeferredCausets[task.HandleDefCauss.GetDefCaus(0).ID]
  2013  	if !ok || defCaus.Len() == 0 || defCaus.LastAnalyzePos.IsNull() {
  2014  		return analyzeTask
  2015  	}
  2016  	var oldHist *statistics.Histogram
  2017  	if statistics.IsAnalyzed(defCaus.Flag) {
  2018  		oldHist = defCaus.Histogram.Copy()
  2019  	} else {
  2020  		d, err := defCaus.LastAnalyzePos.ConvertTo(b.ctx.GetStochastikVars().StmtCtx, defCaus.Tp)
  2021  		if err != nil {
  2022  			b.err = err
  2023  			return nil
  2024  		}
  2025  		_, bktID := defCaus.LessEventCountWithBktIdx(d)
  2026  		if bktID == 0 {
  2027  			return analyzeTask
  2028  		}
  2029  		oldHist = defCaus.TruncateHistogram(bktID)
  2030  		oldHist.NDV = int64(oldHist.TotalEventCount())
  2031  	}
  2032  	exec := analyzeTask.defCausInterDirc
  2033  	analyzeTask.taskType = pkIncrementalTask
  2034  	analyzeTask.defCausIncrementalInterDirc = &analyzePKIncrementalInterDirc{AnalyzeDeferredCausetsInterDirc: *exec, oldHist: oldHist}
  2035  	analyzeTask.job = &statistics.AnalyzeJob{DBName: task.DBName, BlockName: task.BlockName, PartitionName: task.PartitionName, JobInfo: "analyze incremental primary key"}
  2036  	return analyzeTask
  2037  }
  2038  
  2039  func (b *interlockBuilder) buildAnalyzeFastDeferredCauset(e *AnalyzeInterDirc, task causetembedded.AnalyzeDeferredCausetsTask, opts map[ast.AnalyzeOptionType]uint64) {
  2040  	findTask := false
  2041  	for _, eTask := range e.tasks {
  2042  		if eTask.fastInterDirc != nil && eTask.fastInterDirc.blockID.Equals(&task.BlockID) {
  2043  			eTask.fastInterDirc.defcausInfo = append(eTask.fastInterDirc.defcausInfo, task.DefCaussInfo...)
  2044  			findTask = true
  2045  			break
  2046  		}
  2047  	}
  2048  	if !findTask {
  2049  		var concurrency int
  2050  		concurrency, b.err = getBuildStatsConcurrency(e.ctx)
  2051  		if b.err != nil {
  2052  			return
  2053  		}
  2054  		fastInterDirc := &AnalyzeFastInterDirc{
  2055  			ctx:            b.ctx,
  2056  			blockID:        task.BlockID,
  2057  			defcausInfo:    task.DefCaussInfo,
  2058  			handleDefCauss: task.HandleDefCauss,
  2059  			opts:           opts,
  2060  			tblInfo:        task.TblInfo,
  2061  			concurrency:    concurrency,
  2062  			wg:             &sync.WaitGroup{},
  2063  		}
  2064  		b.err = fastInterDirc.calculateEstimateSampleStep()
  2065  		if b.err != nil {
  2066  			return
  2067  		}
  2068  		e.tasks = append(e.tasks, &analyzeTask{
  2069  			taskType:      fastTask,
  2070  			fastInterDirc: fastInterDirc,
  2071  			job:           &statistics.AnalyzeJob{DBName: task.DBName, BlockName: task.BlockName, PartitionName: task.PartitionName, JobInfo: "fast analyze defCausumns"},
  2072  		})
  2073  	}
  2074  }
  2075  
  2076  func (b *interlockBuilder) buildAnalyzeFastIndex(e *AnalyzeInterDirc, task causetembedded.AnalyzeIndexTask, opts map[ast.AnalyzeOptionType]uint64) {
  2077  	findTask := false
  2078  	for _, eTask := range e.tasks {
  2079  		if eTask.fastInterDirc != nil && eTask.fastInterDirc.blockID.Equals(&task.BlockID) {
  2080  			eTask.fastInterDirc.idxsInfo = append(eTask.fastInterDirc.idxsInfo, task.IndexInfo)
  2081  			findTask = true
  2082  			break
  2083  		}
  2084  	}
  2085  	if !findTask {
  2086  		var concurrency int
  2087  		concurrency, b.err = getBuildStatsConcurrency(e.ctx)
  2088  		if b.err != nil {
  2089  			return
  2090  		}
  2091  		fastInterDirc := &AnalyzeFastInterDirc{
  2092  			ctx:         b.ctx,
  2093  			blockID:     task.BlockID,
  2094  			idxsInfo:    []*perceptron.IndexInfo{task.IndexInfo},
  2095  			opts:        opts,
  2096  			tblInfo:     task.TblInfo,
  2097  			concurrency: concurrency,
  2098  			wg:          &sync.WaitGroup{},
  2099  		}
  2100  		b.err = fastInterDirc.calculateEstimateSampleStep()
  2101  		if b.err != nil {
  2102  			return
  2103  		}
  2104  		e.tasks = append(e.tasks, &analyzeTask{
  2105  			taskType:      fastTask,
  2106  			fastInterDirc: fastInterDirc,
  2107  			job:           &statistics.AnalyzeJob{DBName: task.DBName, BlockName: task.BlockName, PartitionName: "fast analyze index " + task.IndexInfo.Name.O},
  2108  		})
  2109  	}
  2110  }
  2111  
  2112  func (b *interlockBuilder) buildAnalyze(v *causetembedded.Analyze) InterlockingDirectorate {
  2113  	e := &AnalyzeInterDirc{
  2114  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  2115  		tasks:                       make([]*analyzeTask, 0, len(v.DefCausTasks)+len(v.IdxTasks)),
  2116  		wg:                          &sync.WaitGroup{},
  2117  	}
  2118  	enableFastAnalyze := b.ctx.GetStochastikVars().EnableFastAnalyze
  2119  	autoAnalyze := ""
  2120  	if b.ctx.GetStochastikVars().InRestrictedALLEGROSQL {
  2121  		autoAnalyze = "auto "
  2122  	}
  2123  	for _, task := range v.DefCausTasks {
  2124  		if task.Incremental {
  2125  			e.tasks = append(e.tasks, b.buildAnalyzePKIncremental(task, v.Opts))
  2126  		} else {
  2127  			if enableFastAnalyze {
  2128  				b.buildAnalyzeFastDeferredCauset(e, task, v.Opts)
  2129  			} else {
  2130  				e.tasks = append(e.tasks, b.buildAnalyzeDeferredCausetsPushdown(task, v.Opts, autoAnalyze))
  2131  			}
  2132  		}
  2133  		if b.err != nil {
  2134  			return nil
  2135  		}
  2136  	}
  2137  	for _, task := range v.IdxTasks {
  2138  		if task.Incremental {
  2139  			e.tasks = append(e.tasks, b.buildAnalyzeIndexIncremental(task, v.Opts))
  2140  		} else {
  2141  			if enableFastAnalyze {
  2142  				b.buildAnalyzeFastIndex(e, task, v.Opts)
  2143  			} else {
  2144  				e.tasks = append(e.tasks, b.buildAnalyzeIndexPushdown(task, v.Opts, autoAnalyze))
  2145  			}
  2146  		}
  2147  		if b.err != nil {
  2148  			return nil
  2149  		}
  2150  	}
  2151  	return e
  2152  }
  2153  
  2154  func constructDistInterDirc(sctx stochastikctx.Context, plans []causetembedded.PhysicalCauset) ([]*fidelpb.InterlockingDirectorate, bool, error) {
  2155  	streaming := true
  2156  	interlocks := make([]*fidelpb.InterlockingDirectorate, 0, len(plans))
  2157  	for _, p := range plans {
  2158  		execPB, err := p.ToPB(sctx, ekv.EinsteinDB)
  2159  		if err != nil {
  2160  			return nil, false, err
  2161  		}
  2162  		if !causetembedded.SupportStreaming(p) {
  2163  			streaming = false
  2164  		}
  2165  		interlocks = append(interlocks, execPB)
  2166  	}
  2167  	return interlocks, streaming, nil
  2168  }
  2169  
  2170  // markChildrenUsedDefCauss compares each child with the output schemaReplicant, and mark
  2171  // each defCausumn of the child is used by output or not.
  2172  func markChildrenUsedDefCauss(outputSchema *memex.Schema, childSchema ...*memex.Schema) (childrenUsed [][]bool) {
  2173  	for _, child := range childSchema {
  2174  		used := memex.GetUsedList(outputSchema.DeferredCausets, child)
  2175  		childrenUsed = append(childrenUsed, used)
  2176  	}
  2177  	return
  2178  }
  2179  
  2180  func constructDistInterDircForTiFlash(sctx stochastikctx.Context, p causetembedded.PhysicalCauset) ([]*fidelpb.InterlockingDirectorate, bool, error) {
  2181  	execPB, err := p.ToPB(sctx, ekv.TiFlash)
  2182  	return []*fidelpb.InterlockingDirectorate{execPB}, false, err
  2183  
  2184  }
  2185  
  2186  func (b *interlockBuilder) constructPosetDagReq(plans []causetembedded.PhysicalCauset, storeType ekv.StoreType) (posetPosetDagReq *fidelpb.PosetDagRequest, streaming bool, err error) {
  2187  	posetPosetDagReq = &fidelpb.PosetDagRequest{}
  2188  	posetPosetDagReq.TimeZoneName, posetPosetDagReq.TimeZoneOffset = timeutil.Zone(b.ctx.GetStochastikVars().Location())
  2189  	sc := b.ctx.GetStochastikVars().StmtCtx
  2190  	if sc.RuntimeStatsDefCausl != nil {
  2191  		defCauslInterDirc := true
  2192  		posetPosetDagReq.DefCauslectInterDircutionSummaries = &defCauslInterDirc
  2193  	}
  2194  	posetPosetDagReq.Flags = sc.PushDownFlags()
  2195  	if storeType == ekv.TiFlash {
  2196  		var interlocks []*fidelpb.InterlockingDirectorate
  2197  		interlocks, streaming, err = constructDistInterDircForTiFlash(b.ctx, plans[0])
  2198  		posetPosetDagReq.RootInterlockingDirectorate = interlocks[0]
  2199  	} else {
  2200  		posetPosetDagReq.InterlockingDirectorates, streaming, err = constructDistInterDirc(b.ctx, plans)
  2201  	}
  2202  
  2203  	allegrosql.SetEncodeType(b.ctx, posetPosetDagReq)
  2204  	return posetPosetDagReq, streaming, err
  2205  }
  2206  
  2207  func (b *interlockBuilder) corDefCausInDistCauset(plans []causetembedded.PhysicalCauset) bool {
  2208  	for _, p := range plans {
  2209  		x, ok := p.(*causetembedded.PhysicalSelection)
  2210  		if !ok {
  2211  			continue
  2212  		}
  2213  		for _, cond := range x.Conditions {
  2214  			if len(memex.ExtractCorDeferredCausets(cond)) > 0 {
  2215  				return true
  2216  			}
  2217  		}
  2218  	}
  2219  	return false
  2220  }
  2221  
  2222  // corDefCausInAccess checks whether there's correlated defCausumn in access conditions.
  2223  func (b *interlockBuilder) corDefCausInAccess(p causetembedded.PhysicalCauset) bool {
  2224  	var access []memex.Expression
  2225  	switch x := p.(type) {
  2226  	case *causetembedded.PhysicalBlockScan:
  2227  		access = x.AccessCondition
  2228  	case *causetembedded.PhysicalIndexScan:
  2229  		access = x.AccessCondition
  2230  	}
  2231  	for _, cond := range access {
  2232  		if len(memex.ExtractCorDeferredCausets(cond)) > 0 {
  2233  			return true
  2234  		}
  2235  	}
  2236  	return false
  2237  }
  2238  
  2239  func (b *interlockBuilder) buildIndexLookUpJoin(v *causetembedded.PhysicalIndexJoin) InterlockingDirectorate {
  2240  	outerInterDirc := b.build(v.Children()[1-v.InnerChildIdx])
  2241  	if b.err != nil {
  2242  		return nil
  2243  	}
  2244  	outerTypes := retTypes(outerInterDirc)
  2245  	innerCauset := v.Children()[v.InnerChildIdx]
  2246  	innerTypes := make([]*types.FieldType, innerCauset.Schema().Len())
  2247  	for i, defCaus := range innerCauset.Schema().DeferredCausets {
  2248  		innerTypes[i] = defCaus.RetType
  2249  	}
  2250  
  2251  	var (
  2252  		outerFilter           []memex.Expression
  2253  		leftTypes, rightTypes []*types.FieldType
  2254  	)
  2255  
  2256  	if v.InnerChildIdx == 0 {
  2257  		leftTypes, rightTypes = innerTypes, outerTypes
  2258  		outerFilter = v.RightConditions
  2259  		if len(v.LeftConditions) > 0 {
  2260  			b.err = errors.Annotate(ErrBuildInterlockingDirectorate, "join's inner condition should be empty")
  2261  			return nil
  2262  		}
  2263  	} else {
  2264  		leftTypes, rightTypes = outerTypes, innerTypes
  2265  		outerFilter = v.LeftConditions
  2266  		if len(v.RightConditions) > 0 {
  2267  			b.err = errors.Annotate(ErrBuildInterlockingDirectorate, "join's inner condition should be empty")
  2268  			return nil
  2269  		}
  2270  	}
  2271  	defaultValues := v.DefaultValues
  2272  	if defaultValues == nil {
  2273  		defaultValues = make([]types.Causet, len(innerTypes))
  2274  	}
  2275  	hasPrefixDefCaus := false
  2276  	for _, l := range v.IdxDefCausLens {
  2277  		if l != types.UnspecifiedLength {
  2278  			hasPrefixDefCaus = true
  2279  			break
  2280  		}
  2281  	}
  2282  	e := &IndexLookUpJoin{
  2283  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), outerInterDirc),
  2284  		outerCtx: outerCtx{
  2285  			rowTypes: outerTypes,
  2286  			filter:   outerFilter,
  2287  		},
  2288  		innerCtx: innerCtx{
  2289  			readerBuilder:    &dataReaderBuilder{Causet: innerCauset, interlockBuilder: b},
  2290  			rowTypes:         innerTypes,
  2291  			defCausLens:      v.IdxDefCausLens,
  2292  			hasPrefixDefCaus: hasPrefixDefCaus,
  2293  		},
  2294  		workerWg:          new(sync.WaitGroup),
  2295  		isOuterJoin:       v.JoinType.IsOuterJoin(),
  2296  		indexRanges:       v.Ranges,
  2297  		keyOff2IdxOff:     v.KeyOff2IdxOff,
  2298  		lastDefCausHelper: v.CompareFilters,
  2299  	}
  2300  	childrenUsedSchema := markChildrenUsedDefCauss(v.Schema(), v.Children()[0].Schema(), v.Children()[1].Schema())
  2301  	e.joiner = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, v.OtherConditions, leftTypes, rightTypes, childrenUsedSchema)
  2302  	outerKeyDefCauss := make([]int, len(v.OuterJoinKeys))
  2303  	for i := 0; i < len(v.OuterJoinKeys); i++ {
  2304  		outerKeyDefCauss[i] = v.OuterJoinKeys[i].Index
  2305  	}
  2306  	e.outerCtx.keyDefCauss = outerKeyDefCauss
  2307  	innerKeyDefCauss := make([]int, len(v.InnerJoinKeys))
  2308  	for i := 0; i < len(v.InnerJoinKeys); i++ {
  2309  		innerKeyDefCauss[i] = v.InnerJoinKeys[i].Index
  2310  	}
  2311  	e.innerCtx.keyDefCauss = innerKeyDefCauss
  2312  	e.joinResult = newFirstChunk(e)
  2313  	interlockCounterIndexLookUpJoin.Inc()
  2314  	return e
  2315  }
  2316  
  2317  func (b *interlockBuilder) buildIndexLookUpMergeJoin(v *causetembedded.PhysicalIndexMergeJoin) InterlockingDirectorate {
  2318  	outerInterDirc := b.build(v.Children()[1-v.InnerChildIdx])
  2319  	if b.err != nil {
  2320  		return nil
  2321  	}
  2322  	outerTypes := retTypes(outerInterDirc)
  2323  	innerCauset := v.Children()[v.InnerChildIdx]
  2324  	innerTypes := make([]*types.FieldType, innerCauset.Schema().Len())
  2325  	for i, defCaus := range innerCauset.Schema().DeferredCausets {
  2326  		innerTypes[i] = defCaus.RetType
  2327  	}
  2328  	var (
  2329  		outerFilter           []memex.Expression
  2330  		leftTypes, rightTypes []*types.FieldType
  2331  	)
  2332  	if v.InnerChildIdx == 0 {
  2333  		leftTypes, rightTypes = innerTypes, outerTypes
  2334  		outerFilter = v.RightConditions
  2335  		if len(v.LeftConditions) > 0 {
  2336  			b.err = errors.Annotate(ErrBuildInterlockingDirectorate, "join's inner condition should be empty")
  2337  			return nil
  2338  		}
  2339  	} else {
  2340  		leftTypes, rightTypes = outerTypes, innerTypes
  2341  		outerFilter = v.LeftConditions
  2342  		if len(v.RightConditions) > 0 {
  2343  			b.err = errors.Annotate(ErrBuildInterlockingDirectorate, "join's inner condition should be empty")
  2344  			return nil
  2345  		}
  2346  	}
  2347  	defaultValues := v.DefaultValues
  2348  	if defaultValues == nil {
  2349  		defaultValues = make([]types.Causet, len(innerTypes))
  2350  	}
  2351  	outerKeyDefCauss := make([]int, len(v.OuterJoinKeys))
  2352  	for i := 0; i < len(v.OuterJoinKeys); i++ {
  2353  		outerKeyDefCauss[i] = v.OuterJoinKeys[i].Index
  2354  	}
  2355  	innerKeyDefCauss := make([]int, len(v.InnerJoinKeys))
  2356  	for i := 0; i < len(v.InnerJoinKeys); i++ {
  2357  		innerKeyDefCauss[i] = v.InnerJoinKeys[i].Index
  2358  	}
  2359  	interlockCounterIndexLookUpJoin.Inc()
  2360  
  2361  	e := &IndexLookUpMergeJoin{
  2362  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), outerInterDirc),
  2363  		outerMergeCtx: outerMergeCtx{
  2364  			rowTypes:      outerTypes,
  2365  			filter:        outerFilter,
  2366  			joinKeys:      v.OuterJoinKeys,
  2367  			keyDefCauss:   outerKeyDefCauss,
  2368  			needOuterSort: v.NeedOuterSort,
  2369  			compareFuncs:  v.OuterCompareFuncs,
  2370  		},
  2371  		innerMergeCtx: innerMergeCtx{
  2372  			readerBuilder:           &dataReaderBuilder{Causet: innerCauset, interlockBuilder: b},
  2373  			rowTypes:                innerTypes,
  2374  			joinKeys:                v.InnerJoinKeys,
  2375  			keyDefCauss:             innerKeyDefCauss,
  2376  			compareFuncs:            v.CompareFuncs,
  2377  			defCausLens:             v.IdxDefCausLens,
  2378  			desc:                    v.Desc,
  2379  			keyOff2KeyOffOrderByIdx: v.KeyOff2KeyOffOrderByIdx,
  2380  		},
  2381  		workerWg:          new(sync.WaitGroup),
  2382  		isOuterJoin:       v.JoinType.IsOuterJoin(),
  2383  		indexRanges:       v.Ranges,
  2384  		keyOff2IdxOff:     v.KeyOff2IdxOff,
  2385  		lastDefCausHelper: v.CompareFilters,
  2386  	}
  2387  	childrenUsedSchema := markChildrenUsedDefCauss(v.Schema(), v.Children()[0].Schema(), v.Children()[1].Schema())
  2388  	joiners := make([]joiner, e.ctx.GetStochastikVars().IndexLookupJoinConcurrency())
  2389  	for i := 0; i < len(joiners); i++ {
  2390  		joiners[i] = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, v.OtherConditions, leftTypes, rightTypes, childrenUsedSchema)
  2391  	}
  2392  	e.joiners = joiners
  2393  	return e
  2394  }
  2395  
  2396  func (b *interlockBuilder) buildIndexNestedLoopHashJoin(v *causetembedded.PhysicalIndexHashJoin) InterlockingDirectorate {
  2397  	e := b.buildIndexLookUpJoin(&(v.PhysicalIndexJoin)).(*IndexLookUpJoin)
  2398  	idxHash := &IndexNestedLoopHashJoin{
  2399  		IndexLookUpJoin: *e,
  2400  		keepOuterOrder:  v.KeepOuterOrder,
  2401  	}
  2402  	concurrency := e.ctx.GetStochastikVars().IndexLookupJoinConcurrency()
  2403  	idxHash.joiners = make([]joiner, concurrency)
  2404  	for i := 0; i < concurrency; i++ {
  2405  		idxHash.joiners[i] = e.joiner.Clone()
  2406  	}
  2407  	return idxHash
  2408  }
  2409  
  2410  // containsLimit tests if the execs contains Limit because we do not know whether `Limit` has consumed all of its' source,
  2411  // so the feedback may not be accurate.
  2412  func containsLimit(execs []*fidelpb.InterlockingDirectorate) bool {
  2413  	for _, exec := range execs {
  2414  		if exec.Limit != nil {
  2415  			return true
  2416  		}
  2417  	}
  2418  	return false
  2419  }
  2420  
  2421  // When allow batch cop is 1, only agg / topN uses batch cop.
  2422  // When allow batch cop is 2, every query uses batch cop.
  2423  func (e *BlockReaderInterlockingDirectorate) setBatchCop(v *causetembedded.PhysicalBlockReader) {
  2424  	if e.storeType != ekv.TiFlash || e.keepOrder {
  2425  		return
  2426  	}
  2427  	switch e.ctx.GetStochastikVars().AllowBatchCop {
  2428  	case 1:
  2429  		for _, p := range v.BlockCausets {
  2430  			switch p.(type) {
  2431  			case *causetembedded.PhysicalHashAgg, *causetembedded.PhysicalStreamAgg, *causetembedded.PhysicalTopN, *causetembedded.PhysicalBroadCastJoin:
  2432  				e.batchCop = true
  2433  			}
  2434  		}
  2435  	case 2:
  2436  		e.batchCop = true
  2437  	}
  2438  	return
  2439  }
  2440  
  2441  func buildNoRangeBlockReader(b *interlockBuilder, v *causetembedded.PhysicalBlockReader) (*BlockReaderInterlockingDirectorate, error) {
  2442  	blockCausets := v.BlockCausets
  2443  	if v.StoreType == ekv.TiFlash {
  2444  		blockCausets = []causetembedded.PhysicalCauset{v.GetBlockCauset()}
  2445  	}
  2446  	posetPosetDagReq, streaming, err := b.constructPosetDagReq(blockCausets, v.StoreType)
  2447  	if err != nil {
  2448  		return nil, err
  2449  	}
  2450  	ts := v.GetBlockScan()
  2451  	tbl, _ := b.is.BlockByID(ts.Block.ID)
  2452  	isPartition, physicalBlockID := ts.IsPartition()
  2453  	if isPartition {
  2454  		pt := tbl.(causet.PartitionedBlock)
  2455  		tbl = pt.GetPartition(physicalBlockID)
  2456  	}
  2457  	startTS, err := b.getSnapshotTS()
  2458  	if err != nil {
  2459  		return nil, err
  2460  	}
  2461  	e := &BlockReaderInterlockingDirectorate{
  2462  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  2463  		posetPosetDagPB:             posetPosetDagReq,
  2464  		startTS:                     startTS,
  2465  		causet:                      tbl,
  2466  		keepOrder:                   ts.KeepOrder,
  2467  		desc:                        ts.Desc,
  2468  		defCausumns:                 ts.DeferredCausets,
  2469  		streaming:                   streaming,
  2470  		corDefCausInFilter:          b.corDefCausInDistCauset(v.BlockCausets),
  2471  		corDefCausInAccess:          b.corDefCausInAccess(v.BlockCausets[0]),
  2472  		plans:                       v.BlockCausets,
  2473  		blockCauset:                 v.GetBlockCauset(),
  2474  		storeType:                   v.StoreType,
  2475  	}
  2476  	e.setBatchCop(v)
  2477  	e.buildVirtualDeferredCausetInfo()
  2478  	if containsLimit(posetPosetDagReq.InterlockingDirectorates) {
  2479  		e.feedback = statistics.NewQueryFeedback(0, nil, 0, ts.Desc)
  2480  	} else {
  2481  		e.feedback = statistics.NewQueryFeedback(getPhysicalBlockID(tbl), ts.Hist, int64(ts.StatsCount()), ts.Desc)
  2482  	}
  2483  	defCauslect := statistics.DefCauslectFeedback(b.ctx.GetStochastikVars().StmtCtx, e.feedback, len(ts.Ranges))
  2484  	if !defCauslect {
  2485  		e.feedback.Invalidate()
  2486  	}
  2487  	e.posetPosetDagPB.DefCauslectRangeCounts = &defCauslect
  2488  	if v.StoreType == ekv.MilevaDB && b.ctx.GetStochastikVars().User != nil {
  2489  		// User info is used to do privilege check. It is only used in MilevaDB cluster memory causet.
  2490  		e.posetPosetDagPB.User = &fidelpb.UserIdentity{
  2491  			UserName: b.ctx.GetStochastikVars().User.Username,
  2492  			UserHost: b.ctx.GetStochastikVars().User.Hostname,
  2493  		}
  2494  	}
  2495  
  2496  	for i := range v.Schema().DeferredCausets {
  2497  		posetPosetDagReq.OutputOffsets = append(posetPosetDagReq.OutputOffsets, uint32(i))
  2498  	}
  2499  
  2500  	return e, nil
  2501  }
  2502  
  2503  // buildBlockReader builds a causet reader interlock. It first build a no range causet reader,
  2504  // and then uFIDelate it ranges from causet scan plan.
  2505  func (b *interlockBuilder) buildBlockReader(v *causetembedded.PhysicalBlockReader) InterlockingDirectorate {
  2506  	if b.ctx.GetStochastikVars().IsPessimisticReadConsistency() {
  2507  		if err := b.refreshForUFIDelateTSForRC(); err != nil {
  2508  			b.err = err
  2509  			return nil
  2510  		}
  2511  	}
  2512  	ret, err := buildNoRangeBlockReader(b, v)
  2513  	if err != nil {
  2514  		b.err = err
  2515  		return nil
  2516  	}
  2517  
  2518  	ts := v.GetBlockScan()
  2519  	ret.ranges = ts.Ranges
  2520  	sctx := b.ctx.GetStochastikVars().StmtCtx
  2521  	sctx.BlockIDs = append(sctx.BlockIDs, ts.Block.ID)
  2522  
  2523  	if !b.ctx.GetStochastikVars().UseDynamicPartitionPrune() {
  2524  		return ret
  2525  	}
  2526  
  2527  	if pi := ts.Block.GetPartitionInfo(); pi == nil {
  2528  		return ret
  2529  	}
  2530  
  2531  	if v.StoreType == ekv.TiFlash {
  2532  		tmp, _ := b.is.BlockByID(ts.Block.ID)
  2533  		tbl := tmp.(causet.PartitionedBlock)
  2534  		partitions, err := partitionPruning(b.ctx, tbl, v.PartitionInfo.PruningConds, v.PartitionInfo.PartitionNames, v.PartitionInfo.DeferredCausets, v.PartitionInfo.DeferredCausetNames)
  2535  		if err != nil {
  2536  			b.err = err
  2537  			return nil
  2538  		}
  2539  		partsInterlockingDirectorate := make([]InterlockingDirectorate, 0, len(partitions))
  2540  		for _, part := range partitions {
  2541  			exec, err := buildNoRangeBlockReader(b, v)
  2542  			if err != nil {
  2543  				b.err = err
  2544  				return nil
  2545  			}
  2546  			exec.ranges = ts.Ranges
  2547  			nexec, err := nextPartitionForBlockReader{exec: exec}.nextPartition(context.Background(), part)
  2548  			if err != nil {
  2549  				b.err = err
  2550  				return nil
  2551  			}
  2552  			partsInterlockingDirectorate = append(partsInterlockingDirectorate, nexec)
  2553  		}
  2554  		if len(partsInterlockingDirectorate) == 0 {
  2555  			return &BlockDualInterDirc{baseInterlockingDirectorate: *ret.base()}
  2556  		}
  2557  		if len(partsInterlockingDirectorate) == 1 {
  2558  			return partsInterlockingDirectorate[0]
  2559  		}
  2560  		return &UnionInterDirc{
  2561  			baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), partsInterlockingDirectorate...),
  2562  			concurrency:                 b.ctx.GetStochastikVars().UnionConcurrency(),
  2563  		}
  2564  	}
  2565  
  2566  	nextPartition := nextPartitionForBlockReader{ret}
  2567  	exec, err := buildPartitionBlock(b, ts.Block, &v.PartitionInfo, ret, nextPartition)
  2568  	if err != nil {
  2569  		b.err = err
  2570  		return nil
  2571  	}
  2572  	return exec
  2573  }
  2574  
  2575  func buildPartitionBlock(b *interlockBuilder, tblInfo *perceptron.BlockInfo, partitionInfo *causetembedded.PartitionInfo, e InterlockingDirectorate, n nextPartition) (InterlockingDirectorate, error) {
  2576  	tmp, _ := b.is.BlockByID(tblInfo.ID)
  2577  	tbl := tmp.(causet.PartitionedBlock)
  2578  	partitions, err := partitionPruning(b.ctx, tbl, partitionInfo.PruningConds, partitionInfo.PartitionNames, partitionInfo.DeferredCausets, partitionInfo.DeferredCausetNames)
  2579  	if err != nil {
  2580  		return nil, err
  2581  	}
  2582  
  2583  	if len(partitions) == 0 {
  2584  		return &BlockDualInterDirc{baseInterlockingDirectorate: *e.base()}, nil
  2585  	}
  2586  	return &PartitionBlockInterlockingDirectorate{
  2587  		baseInterlockingDirectorate: *e.base(),
  2588  		partitions:                  partitions,
  2589  		nextPartition:               n,
  2590  	}, nil
  2591  }
  2592  
  2593  func buildNoRangeIndexReader(b *interlockBuilder, v *causetembedded.PhysicalIndexReader) (*IndexReaderInterlockingDirectorate, error) {
  2594  	posetPosetDagReq, streaming, err := b.constructPosetDagReq(v.IndexCausets, ekv.EinsteinDB)
  2595  	if err != nil {
  2596  		return nil, err
  2597  	}
  2598  	is := v.IndexCausets[0].(*causetembedded.PhysicalIndexScan)
  2599  	tbl, _ := b.is.BlockByID(is.Block.ID)
  2600  	isPartition, physicalBlockID := is.IsPartition()
  2601  	if isPartition {
  2602  		pt := tbl.(causet.PartitionedBlock)
  2603  		tbl = pt.GetPartition(physicalBlockID)
  2604  	} else {
  2605  		physicalBlockID = is.Block.ID
  2606  	}
  2607  	startTS, err := b.getSnapshotTS()
  2608  	if err != nil {
  2609  		return nil, err
  2610  	}
  2611  	e := &IndexReaderInterlockingDirectorate{
  2612  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  2613  		posetPosetDagPB:             posetPosetDagReq,
  2614  		startTS:                     startTS,
  2615  		physicalBlockID:             physicalBlockID,
  2616  		causet:                      tbl,
  2617  		index:                       is.Index,
  2618  		keepOrder:                   is.KeepOrder,
  2619  		desc:                        is.Desc,
  2620  		defCausumns:                 is.DeferredCausets,
  2621  		streaming:                   streaming,
  2622  		corDefCausInFilter:          b.corDefCausInDistCauset(v.IndexCausets),
  2623  		corDefCausInAccess:          b.corDefCausInAccess(v.IndexCausets[0]),
  2624  		idxDefCauss:                 is.IdxDefCauss,
  2625  		defCausLens:                 is.IdxDefCausLens,
  2626  		plans:                       v.IndexCausets,
  2627  		outputDeferredCausets:       v.OutputDeferredCausets,
  2628  	}
  2629  	if containsLimit(posetPosetDagReq.InterlockingDirectorates) {
  2630  		e.feedback = statistics.NewQueryFeedback(0, nil, 0, is.Desc)
  2631  	} else {
  2632  		e.feedback = statistics.NewQueryFeedback(e.physicalBlockID, is.Hist, int64(is.StatsCount()), is.Desc)
  2633  	}
  2634  	defCauslect := statistics.DefCauslectFeedback(b.ctx.GetStochastikVars().StmtCtx, e.feedback, len(is.Ranges))
  2635  	if !defCauslect {
  2636  		e.feedback.Invalidate()
  2637  	}
  2638  	e.posetPosetDagPB.DefCauslectRangeCounts = &defCauslect
  2639  
  2640  	for _, defCaus := range v.OutputDeferredCausets {
  2641  		posetPosetDagReq.OutputOffsets = append(posetPosetDagReq.OutputOffsets, uint32(defCaus.Index))
  2642  	}
  2643  
  2644  	return e, nil
  2645  }
  2646  
  2647  func (b *interlockBuilder) buildIndexReader(v *causetembedded.PhysicalIndexReader) InterlockingDirectorate {
  2648  	if b.ctx.GetStochastikVars().IsPessimisticReadConsistency() {
  2649  		if err := b.refreshForUFIDelateTSForRC(); err != nil {
  2650  			b.err = err
  2651  			return nil
  2652  		}
  2653  	}
  2654  	ret, err := buildNoRangeIndexReader(b, v)
  2655  	if err != nil {
  2656  		b.err = err
  2657  		return nil
  2658  	}
  2659  
  2660  	is := v.IndexCausets[0].(*causetembedded.PhysicalIndexScan)
  2661  	ret.ranges = is.Ranges
  2662  	sctx := b.ctx.GetStochastikVars().StmtCtx
  2663  	sctx.IndexNames = append(sctx.IndexNames, is.Block.Name.O+":"+is.Index.Name.O)
  2664  
  2665  	if !b.ctx.GetStochastikVars().UseDynamicPartitionPrune() {
  2666  		return ret
  2667  	}
  2668  
  2669  	if pi := is.Block.GetPartitionInfo(); pi == nil {
  2670  		return ret
  2671  	}
  2672  
  2673  	nextPartition := nextPartitionForIndexReader{exec: ret}
  2674  	exec, err := buildPartitionBlock(b, is.Block, &v.PartitionInfo, ret, nextPartition)
  2675  	if err != nil {
  2676  		b.err = err
  2677  	}
  2678  	return exec
  2679  }
  2680  
  2681  func buildBlockReq(b *interlockBuilder, schemaLen int, plans []causetembedded.PhysicalCauset) (posetPosetDagReq *fidelpb.PosetDagRequest, streaming bool, val causet.Block, err error) {
  2682  	blockReq, blockStreaming, err := b.constructPosetDagReq(plans, ekv.EinsteinDB)
  2683  	if err != nil {
  2684  		return nil, false, nil, err
  2685  	}
  2686  	for i := 0; i < schemaLen; i++ {
  2687  		blockReq.OutputOffsets = append(blockReq.OutputOffsets, uint32(i))
  2688  	}
  2689  	ts := plans[0].(*causetembedded.PhysicalBlockScan)
  2690  	tbl, _ := b.is.BlockByID(ts.Block.ID)
  2691  	isPartition, physicalBlockID := ts.IsPartition()
  2692  	if isPartition {
  2693  		pt := tbl.(causet.PartitionedBlock)
  2694  		tbl = pt.GetPartition(physicalBlockID)
  2695  	}
  2696  	return blockReq, blockStreaming, tbl, err
  2697  }
  2698  
  2699  func buildIndexReq(b *interlockBuilder, schemaLen, handleLen int, plans []causetembedded.PhysicalCauset) (posetPosetDagReq *fidelpb.PosetDagRequest, streaming bool, err error) {
  2700  	indexReq, indexStreaming, err := b.constructPosetDagReq(plans, ekv.EinsteinDB)
  2701  	if err != nil {
  2702  		return nil, false, err
  2703  	}
  2704  	indexReq.OutputOffsets = []uint32{}
  2705  	for i := 0; i < handleLen; i++ {
  2706  		indexReq.OutputOffsets = append(indexReq.OutputOffsets, uint32(schemaLen+i))
  2707  	}
  2708  	if len(indexReq.OutputOffsets) == 0 {
  2709  		indexReq.OutputOffsets = []uint32{uint32(schemaLen)}
  2710  	}
  2711  	return indexReq, indexStreaming, err
  2712  }
  2713  
  2714  func buildNoRangeIndexLookUpReader(b *interlockBuilder, v *causetembedded.PhysicalIndexLookUpReader) (*IndexLookUpInterlockingDirectorate, error) {
  2715  	is := v.IndexCausets[0].(*causetembedded.PhysicalIndexScan)
  2716  	indexReq, indexStreaming, err := buildIndexReq(b, len(is.Index.DeferredCausets), len(v.CommonHandleDefCauss), v.IndexCausets)
  2717  	if err != nil {
  2718  		return nil, err
  2719  	}
  2720  	blockReq, blockStreaming, tbl, err := buildBlockReq(b, v.Schema().Len(), v.BlockCausets)
  2721  	if err != nil {
  2722  		return nil, err
  2723  	}
  2724  	ts := v.BlockCausets[0].(*causetembedded.PhysicalBlockScan)
  2725  	startTS, err := b.getSnapshotTS()
  2726  	if err != nil {
  2727  		return nil, err
  2728  	}
  2729  	e := &IndexLookUpInterlockingDirectorate{
  2730  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  2731  		posetPosetDagPB:             indexReq,
  2732  		startTS:                     startTS,
  2733  		causet:                      tbl,
  2734  		index:                       is.Index,
  2735  		keepOrder:                   is.KeepOrder,
  2736  		desc:                        is.Desc,
  2737  		blockRequest:                blockReq,
  2738  		defCausumns:                 ts.DeferredCausets,
  2739  		indexStreaming:              indexStreaming,
  2740  		blockStreaming:              blockStreaming,
  2741  		dataReaderBuilder:           &dataReaderBuilder{interlockBuilder: b},
  2742  		corDefCausInIdxSide:         b.corDefCausInDistCauset(v.IndexCausets),
  2743  		corDefCausInTblSide:         b.corDefCausInDistCauset(v.BlockCausets),
  2744  		corDefCausInAccess:          b.corDefCausInAccess(v.IndexCausets[0]),
  2745  		idxDefCauss:                 is.IdxDefCauss,
  2746  		defCausLens:                 is.IdxDefCausLens,
  2747  		idxCausets:                  v.IndexCausets,
  2748  		tblCausets:                  v.BlockCausets,
  2749  		PushedLimit:                 v.PushedLimit,
  2750  	}
  2751  
  2752  	if containsLimit(indexReq.InterlockingDirectorates) {
  2753  		e.feedback = statistics.NewQueryFeedback(0, nil, 0, is.Desc)
  2754  	} else {
  2755  		e.feedback = statistics.NewQueryFeedback(getPhysicalBlockID(tbl), is.Hist, int64(is.StatsCount()), is.Desc)
  2756  	}
  2757  	// Do not defCauslect the feedback for causet request.
  2758  	defCauslectBlock := false
  2759  	e.blockRequest.DefCauslectRangeCounts = &defCauslectBlock
  2760  	defCauslectIndex := statistics.DefCauslectFeedback(b.ctx.GetStochastikVars().StmtCtx, e.feedback, len(is.Ranges))
  2761  	if !defCauslectIndex {
  2762  		e.feedback.Invalidate()
  2763  	}
  2764  	e.posetPosetDagPB.DefCauslectRangeCounts = &defCauslectIndex
  2765  	if v.ExtraHandleDefCaus != nil {
  2766  		e.handleIdx = append(e.handleIdx, v.ExtraHandleDefCaus.Index)
  2767  		e.handleDefCauss = []*memex.DeferredCauset{v.ExtraHandleDefCaus}
  2768  	} else {
  2769  		for _, handleDefCaus := range v.CommonHandleDefCauss {
  2770  			e.handleIdx = append(e.handleIdx, handleDefCaus.Index)
  2771  		}
  2772  		e.handleDefCauss = v.CommonHandleDefCauss
  2773  		e.primaryKeyIndex = blocks.FindPrimaryIndex(tbl.Meta())
  2774  	}
  2775  	return e, nil
  2776  }
  2777  
  2778  func (b *interlockBuilder) buildIndexLookUpReader(v *causetembedded.PhysicalIndexLookUpReader) InterlockingDirectorate {
  2779  	if b.ctx.GetStochastikVars().IsPessimisticReadConsistency() {
  2780  		if err := b.refreshForUFIDelateTSForRC(); err != nil {
  2781  			b.err = err
  2782  			return nil
  2783  		}
  2784  	}
  2785  	ret, err := buildNoRangeIndexLookUpReader(b, v)
  2786  	if err != nil {
  2787  		b.err = err
  2788  		return nil
  2789  	}
  2790  
  2791  	is := v.IndexCausets[0].(*causetembedded.PhysicalIndexScan)
  2792  	ts := v.BlockCausets[0].(*causetembedded.PhysicalBlockScan)
  2793  
  2794  	ret.ranges = is.Ranges
  2795  	interlockCounterIndexLookUpInterlockingDirectorate.Inc()
  2796  
  2797  	sctx := b.ctx.GetStochastikVars().StmtCtx
  2798  	sctx.IndexNames = append(sctx.IndexNames, is.Block.Name.O+":"+is.Index.Name.O)
  2799  	sctx.BlockIDs = append(sctx.BlockIDs, ts.Block.ID)
  2800  
  2801  	if !b.ctx.GetStochastikVars().UseDynamicPartitionPrune() {
  2802  		return ret
  2803  	}
  2804  
  2805  	if pi := is.Block.GetPartitionInfo(); pi == nil {
  2806  		return ret
  2807  	}
  2808  
  2809  	nextPartition := nextPartitionForIndexLookUp{exec: ret}
  2810  	exec, err := buildPartitionBlock(b, ts.Block, &v.PartitionInfo, ret, nextPartition)
  2811  	if err != nil {
  2812  		b.err = err
  2813  		return nil
  2814  	}
  2815  	return exec
  2816  }
  2817  
  2818  func buildNoRangeIndexMergeReader(b *interlockBuilder, v *causetembedded.PhysicalIndexMergeReader) (*IndexMergeReaderInterlockingDirectorate, error) {
  2819  	partialCausetCount := len(v.PartialCausets)
  2820  	partialReqs := make([]*fidelpb.PosetDagRequest, 0, partialCausetCount)
  2821  	partialStreamings := make([]bool, 0, partialCausetCount)
  2822  	indexes := make([]*perceptron.IndexInfo, 0, partialCausetCount)
  2823  	keepOrders := make([]bool, 0, partialCausetCount)
  2824  	descs := make([]bool, 0, partialCausetCount)
  2825  	feedbacks := make([]*statistics.QueryFeedback, 0, partialCausetCount)
  2826  	ts := v.BlockCausets[0].(*causetembedded.PhysicalBlockScan)
  2827  	for i := 0; i < partialCausetCount; i++ {
  2828  		var tempReq *fidelpb.PosetDagRequest
  2829  		var tempStreaming bool
  2830  		var err error
  2831  
  2832  		feedback := statistics.NewQueryFeedback(0, nil, 0, ts.Desc)
  2833  		feedback.Invalidate()
  2834  		feedbacks = append(feedbacks, feedback)
  2835  
  2836  		if is, ok := v.PartialCausets[i][0].(*causetembedded.PhysicalIndexScan); ok {
  2837  			tempReq, tempStreaming, err = buildIndexReq(b, len(is.Index.DeferredCausets), ts.HandleDefCauss.NumDefCauss(), v.PartialCausets[i])
  2838  			keepOrders = append(keepOrders, is.KeepOrder)
  2839  			descs = append(descs, is.Desc)
  2840  			indexes = append(indexes, is.Index)
  2841  		} else {
  2842  			ts := v.PartialCausets[i][0].(*causetembedded.PhysicalBlockScan)
  2843  			tempReq, tempStreaming, _, err = buildBlockReq(b, len(ts.DeferredCausets), v.PartialCausets[i])
  2844  			keepOrders = append(keepOrders, ts.KeepOrder)
  2845  			descs = append(descs, ts.Desc)
  2846  			indexes = append(indexes, nil)
  2847  		}
  2848  		if err != nil {
  2849  			return nil, err
  2850  		}
  2851  		defCauslect := false
  2852  		tempReq.DefCauslectRangeCounts = &defCauslect
  2853  		partialReqs = append(partialReqs, tempReq)
  2854  		partialStreamings = append(partialStreamings, tempStreaming)
  2855  	}
  2856  	blockReq, blockStreaming, tblInfo, err := buildBlockReq(b, v.Schema().Len(), v.BlockCausets)
  2857  	if err != nil {
  2858  		return nil, err
  2859  	}
  2860  	startTS, err := b.getSnapshotTS()
  2861  	if err != nil {
  2862  		return nil, err
  2863  	}
  2864  	e := &IndexMergeReaderInterlockingDirectorate{
  2865  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID()),
  2866  		posetPosetDagPBs:            partialReqs,
  2867  		startTS:                     startTS,
  2868  		causet:                      tblInfo,
  2869  		indexes:                     indexes,
  2870  		descs:                       descs,
  2871  		blockRequest:                blockReq,
  2872  		defCausumns:                 ts.DeferredCausets,
  2873  		partialStreamings:           partialStreamings,
  2874  		blockStreaming:              blockStreaming,
  2875  		partialCausets:              v.PartialCausets,
  2876  		tblCausets:                  v.BlockCausets,
  2877  		dataReaderBuilder:           &dataReaderBuilder{interlockBuilder: b},
  2878  		feedbacks:                   feedbacks,
  2879  		handleDefCauss:              ts.HandleDefCauss,
  2880  	}
  2881  	defCauslectBlock := false
  2882  	e.blockRequest.DefCauslectRangeCounts = &defCauslectBlock
  2883  	return e, nil
  2884  }
  2885  
  2886  func (b *interlockBuilder) buildIndexMergeReader(v *causetembedded.PhysicalIndexMergeReader) InterlockingDirectorate {
  2887  	ret, err := buildNoRangeIndexMergeReader(b, v)
  2888  	if err != nil {
  2889  		b.err = err
  2890  		return nil
  2891  	}
  2892  	ret.ranges = make([][]*ranger.Range, 0, len(v.PartialCausets))
  2893  	sctx := b.ctx.GetStochastikVars().StmtCtx
  2894  	for i := 0; i < len(v.PartialCausets); i++ {
  2895  		if is, ok := v.PartialCausets[i][0].(*causetembedded.PhysicalIndexScan); ok {
  2896  			ret.ranges = append(ret.ranges, is.Ranges)
  2897  			sctx.IndexNames = append(sctx.IndexNames, is.Block.Name.O+":"+is.Index.Name.O)
  2898  		} else {
  2899  			ret.ranges = append(ret.ranges, v.PartialCausets[i][0].(*causetembedded.PhysicalBlockScan).Ranges)
  2900  			if ret.causet.Meta().IsCommonHandle {
  2901  				tblInfo := ret.causet.Meta()
  2902  				sctx.IndexNames = append(sctx.IndexNames, tblInfo.Name.O+":"+blocks.FindPrimaryIndex(tblInfo).Name.O)
  2903  			}
  2904  		}
  2905  	}
  2906  	ts := v.BlockCausets[0].(*causetembedded.PhysicalBlockScan)
  2907  	sctx.BlockIDs = append(sctx.BlockIDs, ts.Block.ID)
  2908  	interlockCounterIndexMergeReaderInterlockingDirectorate.Inc()
  2909  
  2910  	if !b.ctx.GetStochastikVars().UseDynamicPartitionPrune() {
  2911  		return ret
  2912  	}
  2913  
  2914  	if pi := ts.Block.GetPartitionInfo(); pi == nil {
  2915  		return ret
  2916  	}
  2917  
  2918  	nextPartition := nextPartitionForIndexMerge{ret}
  2919  	exec, err := buildPartitionBlock(b, ts.Block, &v.PartitionInfo, ret, nextPartition)
  2920  	if err != nil {
  2921  		b.err = err
  2922  		return nil
  2923  	}
  2924  	return exec
  2925  }
  2926  
  2927  // dataReaderBuilder build an interlock.
  2928  // The interlock can be used to read data in the ranges which are constructed by datums.
  2929  // Differences from interlockBuilder:
  2930  // 1. dataReaderBuilder calculate data range from argument, rather than plan.
  2931  // 2. the result interlock is already opened.
  2932  type dataReaderBuilder struct {
  2933  	causetembedded.Causet
  2934  	*interlockBuilder
  2935  
  2936  	selectResultHook // for testing
  2937  }
  2938  
  2939  type mockPhysicalIndexReader struct {
  2940  	causetembedded.PhysicalCauset
  2941  
  2942  	e InterlockingDirectorate
  2943  }
  2944  
  2945  func (builder *dataReaderBuilder) buildInterlockingDirectorateForIndexJoin(ctx context.Context, lookUpContents []*indexJoinLookUpContent,
  2946  	IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (InterlockingDirectorate, error) {
  2947  	return builder.buildInterlockingDirectorateForIndexJoinInternal(ctx, builder.Causet, lookUpContents, IndexRanges, keyOff2IdxOff, cwc)
  2948  }
  2949  
  2950  func (builder *dataReaderBuilder) buildInterlockingDirectorateForIndexJoinInternal(ctx context.Context, plan causetembedded.Causet, lookUpContents []*indexJoinLookUpContent,
  2951  	IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (InterlockingDirectorate, error) {
  2952  	switch v := plan.(type) {
  2953  	case *causetembedded.PhysicalBlockReader:
  2954  		return builder.buildBlockReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc)
  2955  	case *causetembedded.PhysicalIndexReader:
  2956  		return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc)
  2957  	case *causetembedded.PhysicalIndexLookUpReader:
  2958  		return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc)
  2959  	case *causetembedded.PhysicalUnionScan:
  2960  		return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc)
  2961  	// The inner child of IndexJoin might be Projection when a combination of the following conditions is true:
  2962  	// 	1. The inner child fetch data using indexLookupReader
  2963  	// 	2. PK is not handle
  2964  	// 	3. The inner child needs to keep order
  2965  	// In this case, an extra defCausumn milevadb_rowid will be appended in the output result of IndexLookupReader(see copTask.doubleReadNeedProj).
  2966  	// Then we need a Projection upon IndexLookupReader to prune the redundant defCausumn.
  2967  	case *causetembedded.PhysicalProjection:
  2968  		return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc)
  2969  	// Need to support physical selection because after PR 16389, MilevaDB will push down all the expr supported by EinsteinDB or TiFlash
  2970  	// in predicate push down stage, so if there is an expr which only supported by TiFlash, a physical selection will be added after index read
  2971  	case *causetembedded.PhysicalSelection:
  2972  		childInterDirc, err := builder.buildInterlockingDirectorateForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, IndexRanges, keyOff2IdxOff, cwc)
  2973  		if err != nil {
  2974  			return nil, err
  2975  		}
  2976  		exec := &SelectionInterDirc{
  2977  			baseInterlockingDirectorate: newBaseInterlockingDirectorate(builder.ctx, v.Schema(), v.ID(), childInterDirc),
  2978  			filters:                     v.Conditions,
  2979  		}
  2980  		err = exec.open(ctx)
  2981  		return exec, err
  2982  	case *mockPhysicalIndexReader:
  2983  		return v.e, nil
  2984  	}
  2985  	return nil, errors.New("Wrong plan type for dataReaderBuilder")
  2986  }
  2987  
  2988  func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context, v *causetembedded.PhysicalUnionScan,
  2989  	values []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (InterlockingDirectorate, error) {
  2990  	childBuilder := &dataReaderBuilder{Causet: v.Children()[0], interlockBuilder: builder.interlockBuilder}
  2991  	reader, err := childBuilder.buildInterlockingDirectorateForIndexJoin(ctx, values, indexRanges, keyOff2IdxOff, cwc)
  2992  	if err != nil {
  2993  		return nil, err
  2994  	}
  2995  
  2996  	ret := builder.buildUnionScanFromReader(reader, v)
  2997  	if us, ok := ret.(*UnionScanInterDirc); ok {
  2998  		err = us.open(ctx)
  2999  	}
  3000  	return ret, err
  3001  }
  3002  
  3003  func (builder *dataReaderBuilder) buildBlockReaderForIndexJoin(ctx context.Context, v *causetembedded.PhysicalBlockReader,
  3004  	lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (InterlockingDirectorate, error) {
  3005  	e, err := buildNoRangeBlockReader(builder.interlockBuilder, v)
  3006  	if err != nil {
  3007  		return nil, err
  3008  	}
  3009  	tbInfo := e.causet.Meta()
  3010  	if v.IsCommonHandle {
  3011  		ekvRanges, err := buildEkvRangesForIndexJoin(e.ctx, getPhysicalBlockID(e.causet), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc)
  3012  		if err != nil {
  3013  			return nil, err
  3014  		}
  3015  		if tbInfo.GetPartitionInfo() == nil {
  3016  			return builder.buildBlockReaderFromEkvRanges(ctx, e, ekvRanges)
  3017  		}
  3018  		e.ekvRangeBuilder = ekvRangeBuilderFromFunc(func(pid int64) ([]ekv.KeyRange, error) {
  3019  			return buildEkvRangesForIndexJoin(e.ctx, pid, -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc)
  3020  		})
  3021  		nextPartition := nextPartitionForBlockReader{e}
  3022  		return buildPartitionBlock(builder.interlockBuilder, tbInfo, &v.PartitionInfo, e, nextPartition)
  3023  	}
  3024  	handles := make([]ekv.Handle, 0, len(lookUpContents))
  3025  	for _, content := range lookUpContents {
  3026  		isValidHandle := true
  3027  		handle := ekv.IntHandle(content.keys[0].GetInt64())
  3028  		for _, key := range content.keys {
  3029  			if handle.IntValue() != key.GetInt64() {
  3030  				isValidHandle = false
  3031  				break
  3032  			}
  3033  		}
  3034  		if isValidHandle {
  3035  			handles = append(handles, handle)
  3036  		}
  3037  	}
  3038  
  3039  	if tbInfo.GetPartitionInfo() == nil {
  3040  		return builder.buildBlockReaderFromHandles(ctx, e, handles)
  3041  	}
  3042  	if !builder.ctx.GetStochastikVars().UseDynamicPartitionPrune() {
  3043  		return builder.buildBlockReaderFromHandles(ctx, e, handles)
  3044  	}
  3045  
  3046  	e.ekvRangeBuilder = ekvRangeBuilderFromHandles(handles)
  3047  	nextPartition := nextPartitionForBlockReader{e}
  3048  	return buildPartitionBlock(builder.interlockBuilder, tbInfo, &v.PartitionInfo, e, nextPartition)
  3049  }
  3050  
  3051  type ekvRangeBuilderFromFunc func(pid int64) ([]ekv.KeyRange, error)
  3052  
  3053  func (h ekvRangeBuilderFromFunc) buildKeyRange(pid int64) ([]ekv.KeyRange, error) {
  3054  	return h(pid)
  3055  }
  3056  
  3057  type ekvRangeBuilderFromHandles []ekv.Handle
  3058  
  3059  func (h ekvRangeBuilderFromHandles) buildKeyRange(pid int64) ([]ekv.KeyRange, error) {
  3060  	handles := []ekv.Handle(h)
  3061  	sort.Slice(handles, func(i, j int) bool {
  3062  		return handles[i].Compare(handles[j]) < 0
  3063  	})
  3064  	return allegrosql.BlockHandlesToKVRanges(pid, handles), nil
  3065  }
  3066  
  3067  func (builder *dataReaderBuilder) buildBlockReaderBase(ctx context.Context, e *BlockReaderInterlockingDirectorate, reqBuilderWithRange allegrosql.RequestBuilder) (*BlockReaderInterlockingDirectorate, error) {
  3068  	startTS, err := builder.getSnapshotTS()
  3069  	if err != nil {
  3070  		return nil, err
  3071  	}
  3072  	ekvReq, err := reqBuilderWithRange.
  3073  		SetPosetDagRequest(e.posetPosetDagPB).
  3074  		SetStartTS(startTS).
  3075  		SetDesc(e.desc).
  3076  		SetKeepOrder(e.keepOrder).
  3077  		SetStreaming(e.streaming).
  3078  		SetFromStochastikVars(e.ctx.GetStochastikVars()).
  3079  		Build()
  3080  	if err != nil {
  3081  		return nil, err
  3082  	}
  3083  	e.ekvRanges = append(e.ekvRanges, ekvReq.KeyRanges...)
  3084  	e.resultHandler = &blockResultHandler{}
  3085  	result, err := builder.SelectResult(ctx, builder.ctx, ekvReq, retTypes(e), e.feedback, getPhysicalCausetIDs(e.plans), e.id)
  3086  	if err != nil {
  3087  		return nil, err
  3088  	}
  3089  	result.Fetch(ctx)
  3090  	e.resultHandler.open(nil, result)
  3091  	return e, nil
  3092  }
  3093  
  3094  func (builder *dataReaderBuilder) buildBlockReaderFromHandles(ctx context.Context, e *BlockReaderInterlockingDirectorate, handles []ekv.Handle) (*BlockReaderInterlockingDirectorate, error) {
  3095  	sort.Slice(handles, func(i, j int) bool {
  3096  		return handles[i].Compare(handles[j]) < 0
  3097  	})
  3098  	var b allegrosql.RequestBuilder
  3099  	b.SetBlockHandles(getPhysicalBlockID(e.causet), handles)
  3100  	return builder.buildBlockReaderBase(ctx, e, b)
  3101  }
  3102  
  3103  func (builder *dataReaderBuilder) buildBlockReaderFromEkvRanges(ctx context.Context, e *BlockReaderInterlockingDirectorate, ranges []ekv.KeyRange) (InterlockingDirectorate, error) {
  3104  	var b allegrosql.RequestBuilder
  3105  	b.SetKeyRanges(ranges)
  3106  	return builder.buildBlockReaderBase(ctx, e, b)
  3107  }
  3108  
  3109  func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Context, v *causetembedded.PhysicalIndexReader,
  3110  	lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (InterlockingDirectorate, error) {
  3111  	e, err := buildNoRangeIndexReader(builder.interlockBuilder, v)
  3112  	if err != nil {
  3113  		return nil, err
  3114  	}
  3115  	tbInfo := e.causet.Meta()
  3116  	if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetStochastikVars().UseDynamicPartitionPrune() {
  3117  		ekvRanges, err := buildEkvRangesForIndexJoin(e.ctx, e.physicalBlockID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc)
  3118  		if err != nil {
  3119  			return nil, err
  3120  		}
  3121  		err = e.open(ctx, ekvRanges)
  3122  		return e, err
  3123  	}
  3124  
  3125  	e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc)
  3126  	if err != nil {
  3127  		return nil, err
  3128  	}
  3129  	nextPartition := nextPartitionForIndexReader{exec: e}
  3130  	ret, err := buildPartitionBlock(builder.interlockBuilder, tbInfo, &v.PartitionInfo, e, nextPartition)
  3131  	if err != nil {
  3132  		return nil, err
  3133  	}
  3134  	err = ret.Open(ctx)
  3135  	return ret, err
  3136  }
  3137  
  3138  func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context.Context, v *causetembedded.PhysicalIndexLookUpReader,
  3139  	lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (InterlockingDirectorate, error) {
  3140  	e, err := buildNoRangeIndexLookUpReader(builder.interlockBuilder, v)
  3141  	if err != nil {
  3142  		return nil, err
  3143  	}
  3144  
  3145  	tbInfo := e.causet.Meta()
  3146  	if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetStochastikVars().UseDynamicPartitionPrune() {
  3147  		e.ekvRanges, err = buildEkvRangesForIndexJoin(e.ctx, getPhysicalBlockID(e.causet), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc)
  3148  		if err != nil {
  3149  			return nil, err
  3150  		}
  3151  		err = e.open(ctx)
  3152  		return e, err
  3153  	}
  3154  
  3155  	e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc)
  3156  	if err != nil {
  3157  		return nil, err
  3158  	}
  3159  	nextPartition := nextPartitionForIndexLookUp{exec: e}
  3160  	ret, err := buildPartitionBlock(builder.interlockBuilder, tbInfo, &v.PartitionInfo, e, nextPartition)
  3161  	if err != nil {
  3162  		return nil, err
  3163  	}
  3164  	err = ret.Open(ctx)
  3165  	return ret, err
  3166  }
  3167  
  3168  func (builder *dataReaderBuilder) buildProjectionForIndexJoin(ctx context.Context, v *causetembedded.PhysicalProjection,
  3169  	lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (InterlockingDirectorate, error) {
  3170  	physicalIndexLookUp, isDoubleRead := v.Children()[0].(*causetembedded.PhysicalIndexLookUpReader)
  3171  	if !isDoubleRead {
  3172  		return nil, errors.Errorf("inner child of Projection should be IndexLookupReader, but got %T", v)
  3173  	}
  3174  	childInterDirc, err := builder.buildIndexLookUpReaderForIndexJoin(ctx, physicalIndexLookUp, lookUpContents, indexRanges, keyOff2IdxOff, cwc)
  3175  	if err != nil {
  3176  		return nil, err
  3177  	}
  3178  
  3179  	e := &ProjectionInterDirc{
  3180  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(builder.ctx, v.Schema(), v.ID(), childInterDirc),
  3181  		numWorkers:                  int64(builder.ctx.GetStochastikVars().ProjectionConcurrency()),
  3182  		evaluatorSuit:               memex.NewEvaluatorSuite(v.Exprs, v.AvoidDeferredCausetEvaluator),
  3183  		calculateNoDelay:            v.CalculateNoDelay,
  3184  	}
  3185  
  3186  	// If the calculation event count for this Projection operator is smaller
  3187  	// than a Chunk size, we turn back to the un-parallel Projection
  3188  	// implementation to reduce the goroutine overhead.
  3189  	if int64(v.StatsCount()) < int64(builder.ctx.GetStochastikVars().MaxChunkSize) {
  3190  		e.numWorkers = 0
  3191  	}
  3192  	err = e.open(ctx)
  3193  
  3194  	return e, err
  3195  }
  3196  
  3197  // buildRangesForIndexJoin builds ekv ranges for index join when the inner plan is index scan plan.
  3198  func buildRangesForIndexJoin(ctx stochastikctx.Context, lookUpContents []*indexJoinLookUpContent,
  3199  	ranges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) ([]*ranger.Range, error) {
  3200  	retRanges := make([]*ranger.Range, 0, len(ranges)*len(lookUpContents))
  3201  	lastPos := len(ranges[0].LowVal) - 1
  3202  	tmFIDelatumRanges := make([]*ranger.Range, 0, len(lookUpContents))
  3203  	for _, content := range lookUpContents {
  3204  		for _, ran := range ranges {
  3205  			for keyOff, idxOff := range keyOff2IdxOff {
  3206  				ran.LowVal[idxOff] = content.keys[keyOff]
  3207  				ran.HighVal[idxOff] = content.keys[keyOff]
  3208  			}
  3209  		}
  3210  		if cwc == nil {
  3211  			// A deep copy is need here because the old []*range.Range is overwriten
  3212  			for _, ran := range ranges {
  3213  				retRanges = append(retRanges, ran.Clone())
  3214  			}
  3215  			continue
  3216  		}
  3217  		nextDefCausRanges, err := cwc.BuildRangesByEvent(ctx, content.event)
  3218  		if err != nil {
  3219  			return nil, err
  3220  		}
  3221  		for _, nextDefCausRan := range nextDefCausRanges {
  3222  			for _, ran := range ranges {
  3223  				ran.LowVal[lastPos] = nextDefCausRan.LowVal[0]
  3224  				ran.HighVal[lastPos] = nextDefCausRan.HighVal[0]
  3225  				ran.LowExclude = nextDefCausRan.LowExclude
  3226  				ran.HighExclude = nextDefCausRan.HighExclude
  3227  				tmFIDelatumRanges = append(tmFIDelatumRanges, ran.Clone())
  3228  			}
  3229  		}
  3230  	}
  3231  
  3232  	if cwc == nil {
  3233  		return retRanges, nil
  3234  	}
  3235  
  3236  	return ranger.UnionRanges(ctx.GetStochastikVars().StmtCtx, tmFIDelatumRanges, true)
  3237  }
  3238  
  3239  // buildEkvRangesForIndexJoin builds ekv ranges for index join when the inner plan is index scan plan.
  3240  func buildEkvRangesForIndexJoin(ctx stochastikctx.Context, blockID, indexID int64, lookUpContents []*indexJoinLookUpContent,
  3241  	ranges []*ranger.Range, keyOff2IdxOff []int, cwc *causetembedded.DefCausWithCmpFuncManager) (_ []ekv.KeyRange, err error) {
  3242  	ekvRanges := make([]ekv.KeyRange, 0, len(ranges)*len(lookUpContents))
  3243  	lastPos := len(ranges[0].LowVal) - 1
  3244  	sc := ctx.GetStochastikVars().StmtCtx
  3245  	tmFIDelatumRanges := make([]*ranger.Range, 0, len(lookUpContents))
  3246  	for _, content := range lookUpContents {
  3247  		for _, ran := range ranges {
  3248  			for keyOff, idxOff := range keyOff2IdxOff {
  3249  				ran.LowVal[idxOff] = content.keys[keyOff]
  3250  				ran.HighVal[idxOff] = content.keys[keyOff]
  3251  			}
  3252  		}
  3253  		if cwc == nil {
  3254  			// Index id is -1 means it's a common handle.
  3255  			var tmpEkvRanges []ekv.KeyRange
  3256  			var err error
  3257  			if indexID == -1 {
  3258  				tmpEkvRanges, err = allegrosql.CommonHandleRangesToKVRanges(sc, blockID, ranges)
  3259  			} else {
  3260  				tmpEkvRanges, err = allegrosql.IndexRangesToKVRanges(sc, blockID, indexID, ranges, nil)
  3261  			}
  3262  			if err != nil {
  3263  				return nil, err
  3264  			}
  3265  			ekvRanges = append(ekvRanges, tmpEkvRanges...)
  3266  			continue
  3267  		}
  3268  		nextDefCausRanges, err := cwc.BuildRangesByEvent(ctx, content.event)
  3269  		if err != nil {
  3270  			return nil, err
  3271  		}
  3272  		for _, nextDefCausRan := range nextDefCausRanges {
  3273  			for _, ran := range ranges {
  3274  				ran.LowVal[lastPos] = nextDefCausRan.LowVal[0]
  3275  				ran.HighVal[lastPos] = nextDefCausRan.HighVal[0]
  3276  				ran.LowExclude = nextDefCausRan.LowExclude
  3277  				ran.HighExclude = nextDefCausRan.HighExclude
  3278  				tmFIDelatumRanges = append(tmFIDelatumRanges, ran.Clone())
  3279  			}
  3280  		}
  3281  	}
  3282  
  3283  	if cwc == nil {
  3284  		sort.Slice(ekvRanges, func(i, j int) bool {
  3285  			return bytes.Compare(ekvRanges[i].StartKey, ekvRanges[j].StartKey) < 0
  3286  		})
  3287  		return ekvRanges, nil
  3288  	}
  3289  
  3290  	tmFIDelatumRanges, err = ranger.UnionRanges(ctx.GetStochastikVars().StmtCtx, tmFIDelatumRanges, true)
  3291  	if err != nil {
  3292  		return nil, err
  3293  	}
  3294  	// Index id is -1 means it's a common handle.
  3295  	if indexID == -1 {
  3296  		return allegrosql.CommonHandleRangesToKVRanges(ctx.GetStochastikVars().StmtCtx, blockID, tmFIDelatumRanges)
  3297  	}
  3298  	return allegrosql.IndexRangesToKVRanges(ctx.GetStochastikVars().StmtCtx, blockID, indexID, tmFIDelatumRanges, nil)
  3299  }
  3300  
  3301  func (b *interlockBuilder) buildWindow(v *causetembedded.PhysicalWindow) *WindowInterDirc {
  3302  	childInterDirc := b.build(v.Children()[0])
  3303  	if b.err != nil {
  3304  		return nil
  3305  	}
  3306  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID(), childInterDirc)
  3307  	groupByItems := make([]memex.Expression, 0, len(v.PartitionBy))
  3308  	for _, item := range v.PartitionBy {
  3309  		groupByItems = append(groupByItems, item.DefCaus)
  3310  	}
  3311  	orderByDefCauss := make([]*memex.DeferredCauset, 0, len(v.OrderBy))
  3312  	for _, item := range v.OrderBy {
  3313  		orderByDefCauss = append(orderByDefCauss, item.DefCaus)
  3314  	}
  3315  	windowFuncs := make([]aggfuncs.AggFunc, 0, len(v.WindowFuncDescs))
  3316  	partialResults := make([]aggfuncs.PartialResult, 0, len(v.WindowFuncDescs))
  3317  	resultDefCausIdx := v.Schema().Len() - len(v.WindowFuncDescs)
  3318  	for _, desc := range v.WindowFuncDescs {
  3319  		aggDesc, err := aggregation.NewAggFuncDesc(b.ctx, desc.Name, desc.Args, false)
  3320  		if err != nil {
  3321  			b.err = err
  3322  			return nil
  3323  		}
  3324  		agg := aggfuncs.BuildWindowFunctions(b.ctx, aggDesc, resultDefCausIdx, orderByDefCauss)
  3325  		windowFuncs = append(windowFuncs, agg)
  3326  		partialResult, _ := agg.AllocPartialResult()
  3327  		partialResults = append(partialResults, partialResult)
  3328  		resultDefCausIdx++
  3329  	}
  3330  	var processor windowProcessor
  3331  	if v.Frame == nil {
  3332  		processor = &aggWindowProcessor{
  3333  			windowFuncs:    windowFuncs,
  3334  			partialResults: partialResults,
  3335  		}
  3336  	} else if v.Frame.Type == ast.Events {
  3337  		processor = &rowFrameWindowProcessor{
  3338  			windowFuncs:    windowFuncs,
  3339  			partialResults: partialResults,
  3340  			start:          v.Frame.Start,
  3341  			end:            v.Frame.End,
  3342  		}
  3343  	} else {
  3344  		cmpResult := int64(-1)
  3345  		if len(v.OrderBy) > 0 && v.OrderBy[0].Desc {
  3346  			cmpResult = 1
  3347  		}
  3348  		processor = &rangeFrameWindowProcessor{
  3349  			windowFuncs:       windowFuncs,
  3350  			partialResults:    partialResults,
  3351  			start:             v.Frame.Start,
  3352  			end:               v.Frame.End,
  3353  			orderByDefCauss:   orderByDefCauss,
  3354  			expectedCmpResult: cmpResult,
  3355  		}
  3356  	}
  3357  	return &WindowInterDirc{baseInterlockingDirectorate: base,
  3358  		processor:      processor,
  3359  		groupChecker:   newVecGroupChecker(b.ctx, groupByItems),
  3360  		numWindowFuncs: len(v.WindowFuncDescs),
  3361  	}
  3362  }
  3363  
  3364  func (b *interlockBuilder) buildShuffle(v *causetembedded.PhysicalShuffle) *ShuffleInterDirc {
  3365  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())
  3366  	shuffle := &ShuffleInterDirc{baseInterlockingDirectorate: base,
  3367  		concurrency: v.Concurrency,
  3368  	}
  3369  
  3370  	switch v.SplitterType {
  3371  	case causetembedded.PartitionHashSplitterType:
  3372  		shuffle.splitter = &partitionHashSplitter{
  3373  			byItems:    v.HashByItems,
  3374  			numWorkers: shuffle.concurrency,
  3375  		}
  3376  	default:
  3377  		panic("Not implemented. Should not reach here.")
  3378  	}
  3379  
  3380  	shuffle.dataSource = b.build(v.DataSource)
  3381  	if b.err != nil {
  3382  		return nil
  3383  	}
  3384  
  3385  	// head & tail of physical plans' chain within "partition".
  3386  	var head, tail = v.Children()[0], v.Tail
  3387  
  3388  	shuffle.workers = make([]*shuffleWorker, shuffle.concurrency)
  3389  	for i := range shuffle.workers {
  3390  		w := &shuffleWorker{
  3391  			baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.DataSource.Schema(), v.DataSource.ID()),
  3392  		}
  3393  
  3394  		stub := causetembedded.PhysicalShuffleDataSourceStub{
  3395  			Worker: (unsafe.Pointer)(w),
  3396  		}.Init(b.ctx, v.DataSource.Stats(), v.DataSource.SelectBlockOffset(), nil)
  3397  		stub.SetSchema(v.DataSource.Schema())
  3398  
  3399  		tail.SetChildren(stub)
  3400  		w.childInterDirc = b.build(head)
  3401  		if b.err != nil {
  3402  			return nil
  3403  		}
  3404  
  3405  		shuffle.workers[i] = w
  3406  	}
  3407  
  3408  	return shuffle
  3409  }
  3410  
  3411  func (b *interlockBuilder) buildShuffleDataSourceStub(v *causetembedded.PhysicalShuffleDataSourceStub) *shuffleWorker {
  3412  	return (*shuffleWorker)(v.Worker)
  3413  }
  3414  
  3415  func (b *interlockBuilder) buildALLEGROSQLBindInterDirc(v *causetembedded.ALLEGROSQLBindCauset) InterlockingDirectorate {
  3416  	base := newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())
  3417  	base.initCap = chunk.ZeroCapacity
  3418  
  3419  	e := &ALLEGROSQLBindInterDirc{
  3420  		baseInterlockingDirectorate: base,
  3421  		sqlBindOp:                   v.ALLEGROSQLBindOp,
  3422  		normdOrigALLEGROSQL:         v.NormdOrigALLEGROSQL,
  3423  		bindALLEGROSQL:              v.BindALLEGROSQL,
  3424  		charset:                     v.Charset,
  3425  		defCauslation:               v.DefCauslation,
  3426  		EDB:                         v.EDB,
  3427  		isGlobal:                    v.IsGlobal,
  3428  		bindAst:                     v.BindStmt,
  3429  	}
  3430  	return e
  3431  }
  3432  
  3433  // NewEventCausetDecoder creates a chunk causetDecoder for new event format event value decode.
  3434  func NewEventCausetDecoder(ctx stochastikctx.Context, schemaReplicant *memex.Schema, tbl *perceptron.BlockInfo) *rowcodec.ChunkCausetDecoder {
  3435  	getDefCausInfoByID := func(tbl *perceptron.BlockInfo, defCausID int64) *perceptron.DeferredCausetInfo {
  3436  		for _, defCaus := range tbl.DeferredCausets {
  3437  			if defCaus.ID == defCausID {
  3438  				return defCaus
  3439  			}
  3440  		}
  3441  		return nil
  3442  	}
  3443  	var pkDefCauss []int64
  3444  	reqDefCauss := make([]rowcodec.DefCausInfo, len(schemaReplicant.DeferredCausets))
  3445  	for i := range schemaReplicant.DeferredCausets {
  3446  		idx, defCaus := i, schemaReplicant.DeferredCausets[i]
  3447  		isPK := (tbl.PKIsHandle && allegrosql.HasPriKeyFlag(defCaus.RetType.Flag)) || defCaus.ID == perceptron.ExtraHandleID
  3448  		if isPK {
  3449  			pkDefCauss = append(pkDefCauss, defCaus.ID)
  3450  		}
  3451  		isGeneratedDefCaus := false
  3452  		if defCaus.VirtualExpr != nil {
  3453  			isGeneratedDefCaus = true
  3454  		}
  3455  		reqDefCauss[idx] = rowcodec.DefCausInfo{
  3456  			ID:                defCaus.ID,
  3457  			VirtualGenDefCaus: isGeneratedDefCaus,
  3458  			Ft:                defCaus.RetType,
  3459  		}
  3460  	}
  3461  	if len(pkDefCauss) == 0 {
  3462  		pkDefCauss = blocks.TryGetCommonPkDeferredCausetIds(tbl)
  3463  		if len(pkDefCauss) == 0 {
  3464  			pkDefCauss = []int64{0}
  3465  		}
  3466  	}
  3467  	defVal := func(i int, chk *chunk.Chunk) error {
  3468  		ci := getDefCausInfoByID(tbl, reqDefCauss[i].ID)
  3469  		d, err := causet.GetDefCausOriginDefaultValue(ctx, ci)
  3470  		if err != nil {
  3471  			return err
  3472  		}
  3473  		chk.AppendCauset(i, &d)
  3474  		return nil
  3475  	}
  3476  	return rowcodec.NewChunkCausetDecoder(reqDefCauss, pkDefCauss, defVal, ctx.GetStochastikVars().TimeZone)
  3477  }
  3478  
  3479  func (b *interlockBuilder) buildBatchPointGet(plan *causetembedded.BatchPointGetCauset) InterlockingDirectorate {
  3480  	if b.ctx.GetStochastikVars().IsPessimisticReadConsistency() {
  3481  		if err := b.refreshForUFIDelateTSForRC(); err != nil {
  3482  			b.err = err
  3483  			return nil
  3484  		}
  3485  	}
  3486  	startTS, err := b.getSnapshotTS()
  3487  	if err != nil {
  3488  		b.err = err
  3489  		return nil
  3490  	}
  3491  	causetDecoder := NewEventCausetDecoder(b.ctx, plan.Schema(), plan.TblInfo)
  3492  	e := &BatchPointGetInterDirc{
  3493  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, plan.Schema(), plan.ID()),
  3494  		tblInfo:                     plan.TblInfo,
  3495  		idxInfo:                     plan.IndexInfo,
  3496  		rowCausetDecoder:            causetDecoder,
  3497  		startTS:                     startTS,
  3498  		keepOrder:                   plan.KeepOrder,
  3499  		desc:                        plan.Desc,
  3500  		dagger:                      plan.Lock,
  3501  		waitTime:                    plan.LockWaitTime,
  3502  		partPos:                     plan.PartitionDefCausPos,
  3503  		defCausumns:                 plan.DeferredCausets,
  3504  	}
  3505  	if e.dagger {
  3506  		b.hasLock = true
  3507  	}
  3508  	var capacity int
  3509  	if plan.IndexInfo != nil && !isCommonHandleRead(plan.TblInfo, plan.IndexInfo) {
  3510  		e.idxVals = plan.IndexValues
  3511  		capacity = len(e.idxVals)
  3512  	} else {
  3513  		// `SELECT a FROM t WHERE a IN (1, 1, 2, 1, 2)` should not return duplicated rows
  3514  		handles := make([]ekv.Handle, 0, len(plan.Handles))
  3515  		dedup := ekv.NewHandleMap()
  3516  		if plan.IndexInfo == nil {
  3517  			for _, handle := range plan.Handles {
  3518  				if _, found := dedup.Get(handle); found {
  3519  					continue
  3520  				}
  3521  				dedup.Set(handle, true)
  3522  				handles = append(handles, handle)
  3523  			}
  3524  		} else {
  3525  			for _, value := range plan.IndexValues {
  3526  				handleBytes, err := EncodeUniqueIndexValuesForKey(e.ctx, e.tblInfo, plan.IndexInfo, value)
  3527  				if err != nil {
  3528  					b.err = err
  3529  					return nil
  3530  				}
  3531  				handle, err := ekv.NewCommonHandle(handleBytes)
  3532  				if err != nil {
  3533  					b.err = err
  3534  					return nil
  3535  				}
  3536  				if _, found := dedup.Get(handle); found {
  3537  					continue
  3538  				}
  3539  				dedup.Set(handle, true)
  3540  				handles = append(handles, handle)
  3541  			}
  3542  		}
  3543  		e.handles = handles
  3544  		capacity = len(e.handles)
  3545  	}
  3546  	e.base().initCap = capacity
  3547  	e.base().maxChunkSize = capacity
  3548  	e.buildVirtualDeferredCausetInfo()
  3549  	return e
  3550  }
  3551  
  3552  func isCommonHandleRead(tbl *perceptron.BlockInfo, idx *perceptron.IndexInfo) bool {
  3553  	return tbl.IsCommonHandle && idx.Primary
  3554  }
  3555  
  3556  func getPhysicalBlockID(t causet.Block) int64 {
  3557  	if p, ok := t.(causet.PhysicalBlock); ok {
  3558  		return p.GetPhysicalID()
  3559  	}
  3560  	return t.Meta().ID
  3561  }
  3562  
  3563  func (b *interlockBuilder) buildAdminShowTelemetry(v *causetembedded.AdminShowTelemetry) InterlockingDirectorate {
  3564  	return &AdminShowTelemetryInterDirc{baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())}
  3565  }
  3566  
  3567  func (b *interlockBuilder) buildAdminResetTelemetryID(v *causetembedded.AdminResetTelemetryID) InterlockingDirectorate {
  3568  	return &AdminResetTelemetryIDInterDirc{baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, v.Schema(), v.ID())}
  3569  }
  3570  
  3571  func partitionPruning(ctx stochastikctx.Context, tbl causet.PartitionedBlock, conds []memex.Expression, partitionNames []perceptron.CIStr,
  3572  	defCausumns []*memex.DeferredCauset, defCausumnNames types.NameSlice) ([]causet.PhysicalBlock, error) {
  3573  	idxArr, err := causetembedded.PartitionPruning(ctx, tbl, conds, partitionNames, defCausumns, defCausumnNames)
  3574  	if err != nil {
  3575  		return nil, err
  3576  	}
  3577  
  3578  	pi := tbl.Meta().GetPartitionInfo()
  3579  	var ret []causet.PhysicalBlock
  3580  	if fullRangePartition(idxArr) {
  3581  		ret = make([]causet.PhysicalBlock, 0, len(pi.Definitions))
  3582  		for _, def := range pi.Definitions {
  3583  			p := tbl.GetPartition(def.ID)
  3584  			ret = append(ret, p)
  3585  		}
  3586  	} else {
  3587  		ret = make([]causet.PhysicalBlock, 0, len(idxArr))
  3588  		for _, idx := range idxArr {
  3589  			pid := pi.Definitions[idx].ID
  3590  			p := tbl.GetPartition(pid)
  3591  			ret = append(ret, p)
  3592  		}
  3593  	}
  3594  	return ret, nil
  3595  }
  3596  
  3597  func fullRangePartition(idxArr []int) bool {
  3598  	return len(idxArr) == 1 && idxArr[0] == causetembedded.FullRange
  3599  }