github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/interlocking_directorate.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package interlock
    15  
    16  import (
    17  	"context"
    18  	"fmt"
    19  	"math"
    20  	"runtime"
    21  	"runtime/trace"
    22  	"strconv"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/cznic/mathutil"
    29  	"github.com/opentracing/opentracing-go"
    30  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    31  	"github.com/whtcorpsinc/BerolinaSQL/ast"
    32  	"github.com/whtcorpsinc/BerolinaSQL/auth"
    33  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    34  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    35  	"github.com/whtcorpsinc/errors"
    36  	"github.com/whtcorpsinc/milevadb/blockcodec"
    37  	"github.com/whtcorpsinc/milevadb/causet"
    38  	"github.com/whtcorpsinc/milevadb/causet/blocks"
    39  	causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded"
    40  	"github.com/whtcorpsinc/milevadb/config"
    41  	"github.com/whtcorpsinc/milevadb/ekv"
    42  	"github.com/whtcorpsinc/milevadb/memex"
    43  	"github.com/whtcorpsinc/milevadb/petri"
    44  	"github.com/whtcorpsinc/milevadb/petri/infosync"
    45  	"github.com/whtcorpsinc/milevadb/privilege"
    46  	"github.com/whtcorpsinc/milevadb/schemareplicant"
    47  	"github.com/whtcorpsinc/milevadb/soliton"
    48  	"github.com/whtcorpsinc/milevadb/soliton/admin"
    49  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    50  	"github.com/whtcorpsinc/milevadb/soliton/disk"
    51  	"github.com/whtcorpsinc/milevadb/soliton/execdetails"
    52  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    53  	"github.com/whtcorpsinc/milevadb/soliton/memory"
    54  	"github.com/whtcorpsinc/milevadb/spacetime"
    55  	"github.com/whtcorpsinc/milevadb/spacetime/autoid"
    56  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    57  	"github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx"
    58  	"github.com/whtcorpsinc/milevadb/stochastikctx/variable"
    59  	"github.com/whtcorpsinc/milevadb/types"
    60  	"go.uber.org/zap"
    61  )
    62  
    63  var (
    64  	_ InterlockingDirectorate = &baseInterlockingDirectorate{}
    65  	_ InterlockingDirectorate = &CheckBlockInterDirc{}
    66  	_ InterlockingDirectorate = &HashAggInterDirc{}
    67  	_ InterlockingDirectorate = &HashJoinInterDirc{}
    68  	_ InterlockingDirectorate = &IndexLookUpInterlockingDirectorate{}
    69  	_ InterlockingDirectorate = &IndexReaderInterlockingDirectorate{}
    70  	_ InterlockingDirectorate = &LimitInterDirc{}
    71  	_ InterlockingDirectorate = &MaxOneEventInterDirc{}
    72  	_ InterlockingDirectorate = &MergeJoinInterDirc{}
    73  	_ InterlockingDirectorate = &ProjectionInterDirc{}
    74  	_ InterlockingDirectorate = &SelectionInterDirc{}
    75  	_ InterlockingDirectorate = &SelectLockInterDirc{}
    76  	_ InterlockingDirectorate = &ShowNextEventIDInterDirc{}
    77  	_ InterlockingDirectorate = &ShowDBSInterDirc{}
    78  	_ InterlockingDirectorate = &ShowDBSJobsInterDirc{}
    79  	_ InterlockingDirectorate = &ShowDBSJobQueriesInterDirc{}
    80  	_ InterlockingDirectorate = &SortInterDirc{}
    81  	_ InterlockingDirectorate = &StreamAggInterDirc{}
    82  	_ InterlockingDirectorate = &BlockDualInterDirc{}
    83  	_ InterlockingDirectorate = &BlockReaderInterlockingDirectorate{}
    84  	_ InterlockingDirectorate = &BlockScanInterDirc{}
    85  	_ InterlockingDirectorate = &TopNInterDirc{}
    86  	_ InterlockingDirectorate = &UnionInterDirc{}
    87  
    88  	// GlobalMemoryUsageTracker is the ancestor of all the InterlockingDirectorates' memory tracker and GlobalMemory Tracker
    89  	GlobalMemoryUsageTracker *memory.Tracker
    90  	// GlobalDiskUsageTracker is the ancestor of all the InterlockingDirectorates' disk tracker
    91  	GlobalDiskUsageTracker *disk.Tracker
    92  )
    93  
    94  type baseInterlockingDirectorate struct {
    95  	ctx             stochastikctx.Context
    96  	id              int
    97  	schemaReplicant *memex.Schema // output schemaReplicant
    98  	initCap         int
    99  	maxChunkSize    int
   100  	children        []InterlockingDirectorate
   101  	retFieldTypes   []*types.FieldType
   102  	runtimeStats    *execdetails.BasicRuntimeStats
   103  }
   104  
   105  const (
   106  	// globalPanicStorageExceed represents the panic message when out of storage quota.
   107  	globalPanicStorageExceed string = "Out Of Global CausetStorage Quota!"
   108  	// globalPanicMemoryExceed represents the panic message when out of memory limit.
   109  	globalPanicMemoryExceed string = "Out Of Global Memory Limit!"
   110  )
   111  
   112  // globalPanicOnExceed panics when GlobalDisTracker storage usage exceeds storage quota.
   113  type globalPanicOnExceed struct {
   114  	mutex sync.Mutex // For synchronization.
   115  }
   116  
   117  func init() {
   118  	action := &globalPanicOnExceed{}
   119  	GlobalMemoryUsageTracker = memory.NewGlobalTracker(memory.LabelForGlobalMemory, -1)
   120  	GlobalMemoryUsageTracker.SetSuperCowOrNoCausetOnExceed(action)
   121  	GlobalDiskUsageTracker = disk.NewGlobalTrcaker(memory.LabelForGlobalStorage, -1)
   122  	GlobalDiskUsageTracker.SetSuperCowOrNoCausetOnExceed(action)
   123  }
   124  
   125  // SetLogHook sets a hook for PanicOnExceed.
   126  func (a *globalPanicOnExceed) SetLogHook(hook func(uint64)) {}
   127  
   128  // CausetAction panics when storage usage exceeds storage quota.
   129  func (a *globalPanicOnExceed) CausetAction(t *memory.Tracker) {
   130  	a.mutex.Lock()
   131  	defer a.mutex.Unlock()
   132  	msg := ""
   133  	switch t.Label() {
   134  	case memory.LabelForGlobalStorage:
   135  		msg = globalPanicStorageExceed
   136  	case memory.LabelForGlobalMemory:
   137  		msg = globalPanicMemoryExceed
   138  	default:
   139  		msg = "Out of Unknown Resource Quota!"
   140  	}
   141  	panic(msg)
   142  }
   143  
   144  // SetFallback sets a fallback action.
   145  func (a *globalPanicOnExceed) SetFallback(memory.SuperCowOrNoCausetOnExceed) {}
   146  
   147  // base returns the baseInterlockingDirectorate of an interlock, don't override this method!
   148  func (e *baseInterlockingDirectorate) base() *baseInterlockingDirectorate {
   149  	return e
   150  }
   151  
   152  // Open initializes children recursively and "childrenResults" according to children's schemas.
   153  func (e *baseInterlockingDirectorate) Open(ctx context.Context) error {
   154  	for _, child := range e.children {
   155  		err := child.Open(ctx)
   156  		if err != nil {
   157  			return err
   158  		}
   159  	}
   160  	return nil
   161  }
   162  
   163  // Close closes all interlocks and release all resources.
   164  func (e *baseInterlockingDirectorate) Close() error {
   165  	var firstErr error
   166  	for _, src := range e.children {
   167  		if err := src.Close(); err != nil && firstErr == nil {
   168  			firstErr = err
   169  		}
   170  	}
   171  	return firstErr
   172  }
   173  
   174  // Schema returns the current baseInterlockingDirectorate's schemaReplicant. If it is nil, then create and return a new one.
   175  func (e *baseInterlockingDirectorate) Schema() *memex.Schema {
   176  	if e.schemaReplicant == nil {
   177  		return memex.NewSchema()
   178  	}
   179  	return e.schemaReplicant
   180  }
   181  
   182  // newFirstChunk creates a new chunk to buffer current interlock's result.
   183  func newFirstChunk(e InterlockingDirectorate) *chunk.Chunk {
   184  	base := e.base()
   185  	return chunk.New(base.retFieldTypes, base.initCap, base.maxChunkSize)
   186  }
   187  
   188  // newList creates a new List to buffer current interlock's result.
   189  func newList(e InterlockingDirectorate) *chunk.List {
   190  	base := e.base()
   191  	return chunk.NewList(base.retFieldTypes, base.initCap, base.maxChunkSize)
   192  }
   193  
   194  // retTypes returns all output defCausumn types.
   195  func retTypes(e InterlockingDirectorate) []*types.FieldType {
   196  	base := e.base()
   197  	return base.retFieldTypes
   198  }
   199  
   200  // Next fills multiple rows into a chunk.
   201  func (e *baseInterlockingDirectorate) Next(ctx context.Context, req *chunk.Chunk) error {
   202  	return nil
   203  }
   204  
   205  func newBaseInterlockingDirectorate(ctx stochastikctx.Context, schemaReplicant *memex.Schema, id int, children ...InterlockingDirectorate) baseInterlockingDirectorate {
   206  	e := baseInterlockingDirectorate{
   207  		children:        children,
   208  		ctx:             ctx,
   209  		id:              id,
   210  		schemaReplicant: schemaReplicant,
   211  		initCap:         ctx.GetStochastikVars().InitChunkSize,
   212  		maxChunkSize:    ctx.GetStochastikVars().MaxChunkSize,
   213  	}
   214  	if ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl != nil {
   215  		if e.id > 0 {
   216  			e.runtimeStats = &execdetails.BasicRuntimeStats{}
   217  			e.ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl.RegisterStats(id, e.runtimeStats)
   218  		}
   219  	}
   220  	if schemaReplicant != nil {
   221  		defcaus := schemaReplicant.DeferredCausets
   222  		e.retFieldTypes = make([]*types.FieldType, len(defcaus))
   223  		for i := range defcaus {
   224  			e.retFieldTypes[i] = defcaus[i].RetType
   225  		}
   226  	}
   227  	return e
   228  }
   229  
   230  // InterlockingDirectorate is the physical implementation of a algebra operator.
   231  //
   232  // In MilevaDB, all algebra operators are implemented as iterators, i.e., they
   233  // support a simple Open-Next-Close protodefCaus. See this paper for more details:
   234  //
   235  // "Volcano-An Extensible and Parallel Query Evaluation System"
   236  //
   237  // Different from Volcano's execution perceptron, a "Next" function call in MilevaDB will
   238  // return a batch of rows, other than a single event in Volcano.
   239  // NOTE: InterlockingDirectorates must call "chk.Reset()" before appending their results to it.
   240  type InterlockingDirectorate interface {
   241  	base() *baseInterlockingDirectorate
   242  	Open(context.Context) error
   243  	Next(ctx context.Context, req *chunk.Chunk) error
   244  	Close() error
   245  	Schema() *memex.Schema
   246  }
   247  
   248  // Next is a wrapper function on e.Next(), it handles some common codes.
   249  func Next(ctx context.Context, e InterlockingDirectorate, req *chunk.Chunk) error {
   250  	base := e.base()
   251  	if base.runtimeStats != nil {
   252  		start := time.Now()
   253  		defer func() { base.runtimeStats.Record(time.Since(start), req.NumEvents()) }()
   254  	}
   255  	sessVars := base.ctx.GetStochastikVars()
   256  	if atomic.LoadUint32(&sessVars.Killed) == 1 {
   257  		return ErrQueryInterrupted
   258  	}
   259  	if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
   260  		span1 := span.Tracer().StartSpan(fmt.Sprintf("%T.Next", e), opentracing.ChildOf(span.Context()))
   261  		defer span1.Finish()
   262  		ctx = opentracing.ContextWithSpan(ctx, span1)
   263  	}
   264  	if trace.IsEnabled() {
   265  		defer trace.StartRegion(ctx, fmt.Sprintf("%T.Next", e)).End()
   266  	}
   267  	err := e.Next(ctx, req)
   268  
   269  	if err != nil {
   270  		return err
   271  	}
   272  	// recheck whether the stochastik/query is killed during the Next()
   273  	if atomic.LoadUint32(&sessVars.Killed) == 1 {
   274  		err = ErrQueryInterrupted
   275  	}
   276  	return err
   277  }
   278  
   279  // CancelDBSJobsInterDirc represents a cancel DBS jobs interlock.
   280  type CancelDBSJobsInterDirc struct {
   281  	baseInterlockingDirectorate
   282  
   283  	cursor int
   284  	jobIDs []int64
   285  	errs   []error
   286  }
   287  
   288  // Next implements the InterlockingDirectorate Next interface.
   289  func (e *CancelDBSJobsInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   290  	req.GrowAndReset(e.maxChunkSize)
   291  	if e.cursor >= len(e.jobIDs) {
   292  		return nil
   293  	}
   294  	numCurBatch := mathutil.Min(req.Capacity(), len(e.jobIDs)-e.cursor)
   295  	for i := e.cursor; i < e.cursor+numCurBatch; i++ {
   296  		req.AppendString(0, fmt.Sprintf("%d", e.jobIDs[i]))
   297  		if e.errs[i] != nil {
   298  			req.AppendString(1, fmt.Sprintf("error: %v", e.errs[i]))
   299  		} else {
   300  			req.AppendString(1, "successful")
   301  		}
   302  	}
   303  	e.cursor += numCurBatch
   304  	return nil
   305  }
   306  
   307  // ShowNextEventIDInterDirc represents a show the next event ID interlock.
   308  type ShowNextEventIDInterDirc struct {
   309  	baseInterlockingDirectorate
   310  	tblName *ast.BlockName
   311  	done    bool
   312  }
   313  
   314  // Next implements the InterlockingDirectorate Next interface.
   315  func (e *ShowNextEventIDInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   316  	req.Reset()
   317  	if e.done {
   318  		return nil
   319  	}
   320  	is := petri.GetPetri(e.ctx).SchemaReplicant()
   321  	tbl, err := is.BlockByName(e.tblName.Schema, e.tblName.Name)
   322  	if err != nil {
   323  		return err
   324  	}
   325  	tblMeta := tbl.Meta()
   326  
   327  	allocators := tbl.SlabPredictors(e.ctx)
   328  	for _, alloc := range allocators {
   329  		nextGlobalID, err := alloc.NextGlobalAutoID(tblMeta.ID)
   330  		if err != nil {
   331  			return err
   332  		}
   333  
   334  		var defCausName, idType string
   335  		switch alloc.GetType() {
   336  		case autoid.EventIDAllocType, autoid.AutoIncrementType:
   337  			idType = "AUTO_INCREMENT"
   338  			if defCaus := tblMeta.GetAutoIncrementDefCausInfo(); defCaus != nil {
   339  				defCausName = defCaus.Name.O
   340  			} else {
   341  				defCausName = perceptron.ExtraHandleName.O
   342  			}
   343  		case autoid.AutoRandomType:
   344  			idType = "AUTO_RANDOM"
   345  			defCausName = tblMeta.GetPkName().O
   346  		case autoid.SequenceType:
   347  			idType = "SEQUENCE"
   348  			defCausName = ""
   349  		default:
   350  			return autoid.ErrInvalidSlabPredictorType.GenWithStackByArgs()
   351  		}
   352  
   353  		req.AppendString(0, e.tblName.Schema.O)
   354  		req.AppendString(1, e.tblName.Name.O)
   355  		req.AppendString(2, defCausName)
   356  		req.AppendInt64(3, nextGlobalID)
   357  		req.AppendString(4, idType)
   358  	}
   359  
   360  	e.done = true
   361  	return nil
   362  }
   363  
   364  // ShowDBSInterDirc represents a show DBS interlock.
   365  type ShowDBSInterDirc struct {
   366  	baseInterlockingDirectorate
   367  
   368  	dbsTenantID string
   369  	selfID      string
   370  	dbsInfo     *admin.DBSInfo
   371  	done        bool
   372  }
   373  
   374  // Next implements the InterlockingDirectorate Next interface.
   375  func (e *ShowDBSInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   376  	req.Reset()
   377  	if e.done {
   378  		return nil
   379  	}
   380  
   381  	dbsJobs := ""
   382  	query := ""
   383  	l := len(e.dbsInfo.Jobs)
   384  	for i, job := range e.dbsInfo.Jobs {
   385  		dbsJobs += job.String()
   386  		query += job.Query
   387  		if i != l-1 {
   388  			dbsJobs += "\n"
   389  			query += "\n"
   390  		}
   391  	}
   392  
   393  	serverInfo, err := infosync.GetServerInfoByID(ctx, e.dbsTenantID)
   394  	if err != nil {
   395  		return err
   396  	}
   397  
   398  	serverAddress := serverInfo.IP + ":" +
   399  		strconv.FormatUint(uint64(serverInfo.Port), 10)
   400  
   401  	req.AppendInt64(0, e.dbsInfo.SchemaVer)
   402  	req.AppendString(1, e.dbsTenantID)
   403  	req.AppendString(2, serverAddress)
   404  	req.AppendString(3, dbsJobs)
   405  	req.AppendString(4, e.selfID)
   406  	req.AppendString(5, query)
   407  
   408  	e.done = true
   409  	return nil
   410  }
   411  
   412  // ShowDBSJobsInterDirc represent a show DBS jobs interlock.
   413  type ShowDBSJobsInterDirc struct {
   414  	baseInterlockingDirectorate
   415  	DBSJobRetriever
   416  
   417  	jobNumber int
   418  	is        schemareplicant.SchemaReplicant
   419  	done      bool
   420  }
   421  
   422  // DBSJobRetriever retrieve the DBSJobs.
   423  type DBSJobRetriever struct {
   424  	runningJobs    []*perceptron.Job
   425  	historyJobIter *spacetime.LastJobIterator
   426  	cursor         int
   427  	is             schemareplicant.SchemaReplicant
   428  	activeRoles    []*auth.RoleIdentity
   429  	cacheJobs      []*perceptron.Job
   430  }
   431  
   432  func (e *DBSJobRetriever) initial(txn ekv.Transaction) error {
   433  	jobs, err := admin.GetDBSJobs(txn)
   434  	if err != nil {
   435  		return err
   436  	}
   437  	m := spacetime.NewMeta(txn)
   438  	e.historyJobIter, err = m.GetLastHistoryDBSJobsIterator()
   439  	if err != nil {
   440  		return err
   441  	}
   442  	e.runningJobs = jobs
   443  	e.cursor = 0
   444  	return nil
   445  }
   446  
   447  func (e *DBSJobRetriever) appendJobToChunk(req *chunk.Chunk, job *perceptron.Job, checker privilege.Manager) {
   448  	schemaName := job.SchemaName
   449  	blockName := ""
   450  	finishTS := uint64(0)
   451  	if job.BinlogInfo != nil {
   452  		finishTS = job.BinlogInfo.FinishedTS
   453  		if job.BinlogInfo.BlockInfo != nil {
   454  			blockName = job.BinlogInfo.BlockInfo.Name.L
   455  		}
   456  		if len(schemaName) == 0 && job.BinlogInfo.DBInfo != nil {
   457  			schemaName = job.BinlogInfo.DBInfo.Name.L
   458  		}
   459  	}
   460  	// For compatibility, the old version of DBS Job wasn't causetstore the schemaReplicant name and causet name.
   461  	if len(schemaName) == 0 {
   462  		schemaName = getSchemaName(e.is, job.SchemaID)
   463  	}
   464  	if len(blockName) == 0 {
   465  		blockName = getBlockName(e.is, job.BlockID)
   466  	}
   467  
   468  	startTime := ts2Time(job.StartTS)
   469  	finishTime := ts2Time(finishTS)
   470  
   471  	// Check the privilege.
   472  	if checker != nil && !checker.RequestVerification(e.activeRoles, strings.ToLower(schemaName), strings.ToLower(blockName), "", allegrosql.AllPrivMask) {
   473  		return
   474  	}
   475  
   476  	req.AppendInt64(0, job.ID)
   477  	req.AppendString(1, schemaName)
   478  	req.AppendString(2, blockName)
   479  	req.AppendString(3, job.Type.String())
   480  	req.AppendString(4, job.SchemaState.String())
   481  	req.AppendInt64(5, job.SchemaID)
   482  	req.AppendInt64(6, job.BlockID)
   483  	req.AppendInt64(7, job.EventCount)
   484  	req.AppendTime(8, startTime)
   485  	if finishTS > 0 {
   486  		req.AppendTime(9, finishTime)
   487  	} else {
   488  		req.AppendNull(9)
   489  	}
   490  	req.AppendString(10, job.State.String())
   491  }
   492  
   493  func ts2Time(timestamp uint64) types.Time {
   494  	duration := time.Duration(math.Pow10(9-int(types.DefaultFsp))) * time.Nanosecond
   495  	t := perceptron.TSConvert2Time(timestamp)
   496  	t.Truncate(duration)
   497  	return types.NewTime(types.FromGoTime(t), allegrosql.TypeDatetime, types.DefaultFsp)
   498  }
   499  
   500  // ShowDBSJobQueriesInterDirc represents a show DBS job queries interlock.
   501  // The jobs id that is given by 'admin show dbs job queries' memex,
   502  // only be searched in the latest 10 history jobs
   503  type ShowDBSJobQueriesInterDirc struct {
   504  	baseInterlockingDirectorate
   505  
   506  	cursor int
   507  	jobs   []*perceptron.Job
   508  	jobIDs []int64
   509  }
   510  
   511  // Open implements the InterlockingDirectorate Open interface.
   512  func (e *ShowDBSJobQueriesInterDirc) Open(ctx context.Context) error {
   513  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
   514  		return err
   515  	}
   516  	txn, err := e.ctx.Txn(true)
   517  	if err != nil {
   518  		return err
   519  	}
   520  	jobs, err := admin.GetDBSJobs(txn)
   521  	if err != nil {
   522  		return err
   523  	}
   524  	historyJobs, err := admin.GetHistoryDBSJobs(txn, admin.DefNumHistoryJobs)
   525  	if err != nil {
   526  		return err
   527  	}
   528  
   529  	e.jobs = append(e.jobs, jobs...)
   530  	e.jobs = append(e.jobs, historyJobs...)
   531  
   532  	return nil
   533  }
   534  
   535  // Next implements the InterlockingDirectorate Next interface.
   536  func (e *ShowDBSJobQueriesInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   537  	req.GrowAndReset(e.maxChunkSize)
   538  	if e.cursor >= len(e.jobs) {
   539  		return nil
   540  	}
   541  	if len(e.jobIDs) >= len(e.jobs) {
   542  		return nil
   543  	}
   544  	numCurBatch := mathutil.Min(req.Capacity(), len(e.jobs)-e.cursor)
   545  	for _, id := range e.jobIDs {
   546  		for i := e.cursor; i < e.cursor+numCurBatch; i++ {
   547  			if id == e.jobs[i].ID {
   548  				req.AppendString(0, e.jobs[i].Query)
   549  			}
   550  		}
   551  	}
   552  	e.cursor += numCurBatch
   553  	return nil
   554  }
   555  
   556  // Open implements the InterlockingDirectorate Open interface.
   557  func (e *ShowDBSJobsInterDirc) Open(ctx context.Context) error {
   558  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
   559  		return err
   560  	}
   561  	txn, err := e.ctx.Txn(true)
   562  	if err != nil {
   563  		return err
   564  	}
   565  	e.DBSJobRetriever.is = e.is
   566  	if e.jobNumber == 0 {
   567  		e.jobNumber = admin.DefNumHistoryJobs
   568  	}
   569  	err = e.DBSJobRetriever.initial(txn)
   570  	if err != nil {
   571  		return err
   572  	}
   573  	return nil
   574  }
   575  
   576  // Next implements the InterlockingDirectorate Next interface.
   577  func (e *ShowDBSJobsInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   578  	req.GrowAndReset(e.maxChunkSize)
   579  	if (e.cursor - len(e.runningJobs)) >= e.jobNumber {
   580  		return nil
   581  	}
   582  	count := 0
   583  
   584  	// Append running dbs jobs.
   585  	if e.cursor < len(e.runningJobs) {
   586  		numCurBatch := mathutil.Min(req.Capacity(), len(e.runningJobs)-e.cursor)
   587  		for i := e.cursor; i < e.cursor+numCurBatch; i++ {
   588  			e.appendJobToChunk(req, e.runningJobs[i], nil)
   589  		}
   590  		e.cursor += numCurBatch
   591  		count += numCurBatch
   592  	}
   593  
   594  	// Append history dbs jobs.
   595  	var err error
   596  	if count < req.Capacity() {
   597  		num := req.Capacity() - count
   598  		remainNum := e.jobNumber - (e.cursor - len(e.runningJobs))
   599  		num = mathutil.Min(num, remainNum)
   600  		e.cacheJobs, err = e.historyJobIter.GetLastJobs(num, e.cacheJobs)
   601  		if err != nil {
   602  			return err
   603  		}
   604  		for _, job := range e.cacheJobs {
   605  			e.appendJobToChunk(req, job, nil)
   606  		}
   607  		e.cursor += len(e.cacheJobs)
   608  	}
   609  	return nil
   610  }
   611  
   612  func getSchemaName(is schemareplicant.SchemaReplicant, id int64) string {
   613  	var schemaName string
   614  	DBInfo, ok := is.SchemaByID(id)
   615  	if ok {
   616  		schemaName = DBInfo.Name.O
   617  		return schemaName
   618  	}
   619  
   620  	return schemaName
   621  }
   622  
   623  func getBlockName(is schemareplicant.SchemaReplicant, id int64) string {
   624  	var blockName string
   625  	causet, ok := is.BlockByID(id)
   626  	if ok {
   627  		blockName = causet.Meta().Name.O
   628  		return blockName
   629  	}
   630  
   631  	return blockName
   632  }
   633  
   634  // CheckBlockInterDirc represents a check causet interlock.
   635  // It is built from the "admin check causet" memex, and it checks if the
   636  // index matches the records in the causet.
   637  type CheckBlockInterDirc struct {
   638  	baseInterlockingDirectorate
   639  
   640  	dbName     string
   641  	causet     causet.Block
   642  	indexInfos []*perceptron.IndexInfo
   643  	srcs       []*IndexLookUpInterlockingDirectorate
   644  	done       bool
   645  	is         schemareplicant.SchemaReplicant
   646  	exitCh     chan struct{}
   647  	retCh      chan error
   648  	checHoTTex bool
   649  }
   650  
   651  // Open implements the InterlockingDirectorate Open interface.
   652  func (e *CheckBlockInterDirc) Open(ctx context.Context) error {
   653  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
   654  		return err
   655  	}
   656  	for _, src := range e.srcs {
   657  		if err := src.Open(ctx); err != nil {
   658  			return errors.Trace(err)
   659  		}
   660  	}
   661  	e.done = false
   662  	return nil
   663  }
   664  
   665  // Close implements the InterlockingDirectorate Close interface.
   666  func (e *CheckBlockInterDirc) Close() error {
   667  	var firstErr error
   668  	for _, src := range e.srcs {
   669  		if err := src.Close(); err != nil && firstErr == nil {
   670  			firstErr = err
   671  		}
   672  	}
   673  	return firstErr
   674  }
   675  
   676  func (e *CheckBlockInterDirc) checkBlockIndexHandle(ctx context.Context, idxInfo *perceptron.IndexInfo) error {
   677  	// For partition causet, there will be multi same index indexLookUpReaders on different partitions.
   678  	for _, src := range e.srcs {
   679  		if src.index.Name.L == idxInfo.Name.L {
   680  			err := e.checHoTTexHandle(ctx, src)
   681  			if err != nil {
   682  				return err
   683  			}
   684  		}
   685  	}
   686  	return nil
   687  }
   688  
   689  func (e *CheckBlockInterDirc) checHoTTexHandle(ctx context.Context, src *IndexLookUpInterlockingDirectorate) error {
   690  	defcaus := src.schemaReplicant.DeferredCausets
   691  	retFieldTypes := make([]*types.FieldType, len(defcaus))
   692  	for i := range defcaus {
   693  		retFieldTypes[i] = defcaus[i].RetType
   694  	}
   695  	chk := chunk.New(retFieldTypes, e.initCap, e.maxChunkSize)
   696  
   697  	var err error
   698  	for {
   699  		err = Next(ctx, src, chk)
   700  		if err != nil {
   701  			break
   702  		}
   703  		if chk.NumEvents() == 0 {
   704  			break
   705  		}
   706  
   707  		select {
   708  		case <-e.exitCh:
   709  			return nil
   710  		default:
   711  		}
   712  	}
   713  	e.retCh <- errors.Trace(err)
   714  	return errors.Trace(err)
   715  }
   716  
   717  func (e *CheckBlockInterDirc) handlePanic(r interface{}) {
   718  	if r != nil {
   719  		e.retCh <- errors.Errorf("%v", r)
   720  	}
   721  }
   722  
   723  // Next implements the InterlockingDirectorate Next interface.
   724  func (e *CheckBlockInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   725  	if e.done || len(e.srcs) == 0 {
   726  		return nil
   727  	}
   728  	defer func() { e.done = true }()
   729  
   730  	idxNames := make([]string, 0, len(e.indexInfos))
   731  	for _, idx := range e.indexInfos {
   732  		idxNames = append(idxNames, idx.Name.O)
   733  	}
   734  	greater, idxOffset, err := admin.ChecHoTTicesCount(e.ctx, e.dbName, e.causet.Meta().Name.O, idxNames)
   735  	if err != nil {
   736  		// For admin check index memex, for speed up and compatibility, doesn't do below checks.
   737  		if e.checHoTTex {
   738  			return errors.Trace(err)
   739  		}
   740  		if greater == admin.IdxCntGreater {
   741  			err = e.checkBlockIndexHandle(ctx, e.indexInfos[idxOffset])
   742  		} else if greater == admin.TblCntGreater {
   743  			err = e.checkBlockRecord(idxOffset)
   744  		}
   745  		if err != nil && admin.ErrDataInConsistent.Equal(err) {
   746  			return ErrAdminCheckBlock.GenWithStack("%v err:%v", e.causet.Meta().Name, err)
   747  		}
   748  		return errors.Trace(err)
   749  	}
   750  
   751  	// The number of causet rows is equal to the number of index rows.
   752  	// TODO: Make the value of concurrency adjusblock. And we can consider the number of records.
   753  	concurrency := 3
   754  	wg := sync.WaitGroup{}
   755  	for i := range e.srcs {
   756  		wg.Add(1)
   757  		go func(num int) {
   758  			defer wg.Done()
   759  			soliton.WithRecovery(func() {
   760  				err1 := e.checHoTTexHandle(ctx, e.srcs[num])
   761  				if err1 != nil {
   762  					logutil.Logger(ctx).Info("check index handle failed", zap.Error(err1))
   763  				}
   764  			}, e.handlePanic)
   765  		}(i)
   766  
   767  		if (i+1)%concurrency == 0 {
   768  			wg.Wait()
   769  		}
   770  	}
   771  
   772  	for i := 0; i < len(e.srcs); i++ {
   773  		err = <-e.retCh
   774  		if err != nil {
   775  			return errors.Trace(err)
   776  		}
   777  	}
   778  	return nil
   779  }
   780  
   781  func (e *CheckBlockInterDirc) checkBlockRecord(idxOffset int) error {
   782  	idxInfo := e.indexInfos[idxOffset]
   783  	txn, err := e.ctx.Txn(true)
   784  	if err != nil {
   785  		return err
   786  	}
   787  	if e.causet.Meta().GetPartitionInfo() == nil {
   788  		idx := blocks.NewIndex(e.causet.Meta().ID, e.causet.Meta(), idxInfo)
   789  		return admin.CheckRecordAndIndex(e.ctx, txn, e.causet, idx)
   790  	}
   791  
   792  	info := e.causet.Meta().GetPartitionInfo()
   793  	for _, def := range info.Definitions {
   794  		pid := def.ID
   795  		partition := e.causet.(causet.PartitionedBlock).GetPartition(pid)
   796  		idx := blocks.NewIndex(def.ID, e.causet.Meta(), idxInfo)
   797  		if err := admin.CheckRecordAndIndex(e.ctx, txn, partition, idx); err != nil {
   798  			return errors.Trace(err)
   799  		}
   800  	}
   801  	return nil
   802  }
   803  
   804  // ShowSlowInterDirc represents the interlock of showing the slow queries.
   805  // It is build from the "admin show slow" memex:
   806  //	admin show slow top [internal | all] N
   807  //	admin show slow recent N
   808  type ShowSlowInterDirc struct {
   809  	baseInterlockingDirectorate
   810  
   811  	ShowSlow *ast.ShowSlow
   812  	result   []*petri.SlowQueryInfo
   813  	cursor   int
   814  }
   815  
   816  // Open implements the InterlockingDirectorate Open interface.
   817  func (e *ShowSlowInterDirc) Open(ctx context.Context) error {
   818  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
   819  		return err
   820  	}
   821  
   822  	dom := petri.GetPetri(e.ctx)
   823  	e.result = dom.ShowSlowQuery(e.ShowSlow)
   824  	return nil
   825  }
   826  
   827  // Next implements the InterlockingDirectorate Next interface.
   828  func (e *ShowSlowInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   829  	req.Reset()
   830  	if e.cursor >= len(e.result) {
   831  		return nil
   832  	}
   833  
   834  	for e.cursor < len(e.result) && req.NumEvents() < e.maxChunkSize {
   835  		slow := e.result[e.cursor]
   836  		req.AppendString(0, slow.ALLEGROALLEGROSQL)
   837  		req.AppendTime(1, types.NewTime(types.FromGoTime(slow.Start), allegrosql.TypeTimestamp, types.MaxFsp))
   838  		req.AppendDuration(2, types.Duration{Duration: slow.Duration, Fsp: types.MaxFsp})
   839  		req.AppendString(3, slow.Detail.String())
   840  		if slow.Succ {
   841  			req.AppendInt64(4, 1)
   842  		} else {
   843  			req.AppendInt64(4, 0)
   844  		}
   845  		req.AppendUint64(5, slow.ConnID)
   846  		req.AppendUint64(6, slow.TxnTS)
   847  		req.AppendString(7, slow.User)
   848  		req.AppendString(8, slow.EDB)
   849  		req.AppendString(9, slow.BlockIDs)
   850  		req.AppendString(10, slow.IndexNames)
   851  		if slow.Internal {
   852  			req.AppendInt64(11, 1)
   853  		} else {
   854  			req.AppendInt64(11, 0)
   855  		}
   856  		req.AppendString(12, slow.Digest)
   857  		e.cursor++
   858  	}
   859  	return nil
   860  }
   861  
   862  // SelectLockInterDirc represents a select dagger interlock.
   863  // It is built from the "SELECT .. FOR UFIDelATE" or the "SELECT .. LOCK IN SHARE MODE" memex.
   864  // For "SELECT .. FOR UFIDelATE" memex, it locks every event key from source InterlockingDirectorate.
   865  // After the execution, the keys are buffered in transaction, and will be sent to KV
   866  // when doing commit. If there is any key already locked by another transaction,
   867  // the transaction will rollback and retry.
   868  type SelectLockInterDirc struct {
   869  	baseInterlockingDirectorate
   870  
   871  	Lock *ast.SelectLockInfo
   872  	keys []ekv.Key
   873  
   874  	tblID2Handle     map[int64][]causetembedded.HandleDefCauss
   875  	partitionedBlock []causet.PartitionedBlock
   876  
   877  	// tblID2Block is cached to reduce cost.
   878  	tblID2Block map[int64]causet.PartitionedBlock
   879  }
   880  
   881  // Open implements the InterlockingDirectorate Open interface.
   882  func (e *SelectLockInterDirc) Open(ctx context.Context) error {
   883  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
   884  		return err
   885  	}
   886  
   887  	if len(e.tblID2Handle) > 0 && len(e.partitionedBlock) > 0 {
   888  		e.tblID2Block = make(map[int64]causet.PartitionedBlock, len(e.partitionedBlock))
   889  		for id := range e.tblID2Handle {
   890  			for _, p := range e.partitionedBlock {
   891  				if id == p.Meta().ID {
   892  					e.tblID2Block[id] = p
   893  				}
   894  			}
   895  		}
   896  	}
   897  
   898  	return nil
   899  }
   900  
   901  // Next implements the InterlockingDirectorate Next interface.
   902  func (e *SelectLockInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   903  	req.GrowAndReset(e.maxChunkSize)
   904  	err := Next(ctx, e.children[0], req)
   905  	if err != nil {
   906  		return err
   907  	}
   908  	// If there's no handle or it's not a `SELECT FOR UFIDelATE` memex.
   909  	if len(e.tblID2Handle) == 0 || (!causetembedded.IsSelectForUFIDelateLockType(e.Lock.LockType)) {
   910  		return nil
   911  	}
   912  
   913  	if req.NumEvents() > 0 {
   914  		iter := chunk.NewIterator4Chunk(req)
   915  		for event := iter.Begin(); event != iter.End(); event = iter.Next() {
   916  			for id, defcaus := range e.tblID2Handle {
   917  				physicalID := id
   918  				if pt, ok := e.tblID2Block[id]; ok {
   919  					// On a partitioned causet, we have to use physical ID to encode the dagger key!
   920  					p, err := pt.GetPartitionByEvent(e.ctx, event.GetCausetEvent(e.base().retFieldTypes))
   921  					if err != nil {
   922  						return err
   923  					}
   924  					physicalID = p.GetPhysicalID()
   925  				}
   926  
   927  				for _, defCaus := range defcaus {
   928  					handle, err := defCaus.BuildHandle(event)
   929  					if err != nil {
   930  						return err
   931  					}
   932  					e.keys = append(e.keys, blockcodec.EncodeEventKeyWithHandle(physicalID, handle))
   933  				}
   934  			}
   935  		}
   936  		return nil
   937  	}
   938  	lockWaitTime := e.ctx.GetStochastikVars().LockWaitTimeout
   939  	if e.Lock.LockType == ast.SelectLockForUFIDelateNoWait {
   940  		lockWaitTime = ekv.LockNoWait
   941  	} else if e.Lock.LockType == ast.SelectLockForUFIDelateWaitN {
   942  		lockWaitTime = int64(e.Lock.WaitSec) * 1000
   943  	}
   944  
   945  	return doLockKeys(ctx, e.ctx, newLockCtx(e.ctx.GetStochastikVars(), lockWaitTime), e.keys...)
   946  }
   947  
   948  func newLockCtx(seVars *variable.StochastikVars, lockWaitTime int64) *ekv.LockCtx {
   949  	return &ekv.LockCtx{
   950  		Killed:                &seVars.Killed,
   951  		ForUFIDelateTS:        seVars.TxnCtx.GetForUFIDelateTS(),
   952  		LockWaitTime:          lockWaitTime,
   953  		WaitStartTime:         seVars.StmtCtx.GetLockWaitStartTime(),
   954  		PessimisticLockWaited: &seVars.StmtCtx.PessimisticLockWaited,
   955  		LockKeysDuration:      &seVars.StmtCtx.LockKeysDuration,
   956  		LockKeysCount:         &seVars.StmtCtx.LockKeysCount,
   957  		LockExpired:           &seVars.TxnCtx.LockExpire,
   958  	}
   959  }
   960  
   961  // doLockKeys is the main entry for pessimistic dagger keys
   962  // waitTime means the dagger operation will wait in milliseconds if target key is already
   963  // locked by others. used for (select for uFIDelate nowait) situation
   964  // except 0 means alwaysWait 1 means nowait
   965  func doLockKeys(ctx context.Context, se stochastikctx.Context, lockCtx *ekv.LockCtx, keys ...ekv.Key) error {
   966  	sctx := se.GetStochastikVars().StmtCtx
   967  	if !sctx.InUFIDelateStmt && !sctx.InDeleteStmt {
   968  		atomic.StoreUint32(&se.GetStochastikVars().TxnCtx.ForUFIDelate, 1)
   969  	}
   970  	// Lock keys only once when finished fetching all results.
   971  	txn, err := se.Txn(true)
   972  	if err != nil {
   973  		return err
   974  	}
   975  	var lockKeyStats *execdetails.LockKeysDetails
   976  	ctx = context.WithValue(ctx, execdetails.LockKeysDetailCtxKey, &lockKeyStats)
   977  	err = txn.LockKeys(stochastikctx.SetCommitCtx(ctx, se), lockCtx, keys...)
   978  	if lockKeyStats != nil {
   979  		sctx.MergeLockKeysInterDircDetails(lockKeyStats)
   980  	}
   981  	return err
   982  }
   983  
   984  // LimitInterDirc represents limit interlock
   985  // It ignores 'Offset' rows from src, then returns 'Count' rows at maximum.
   986  type LimitInterDirc struct {
   987  	baseInterlockingDirectorate
   988  
   989  	begin  uint64
   990  	end    uint64
   991  	cursor uint64
   992  
   993  	// meetFirstBatch represents whether we have met the first valid Chunk from child.
   994  	meetFirstBatch bool
   995  
   996  	childResult *chunk.Chunk
   997  }
   998  
   999  // Next implements the InterlockingDirectorate Next interface.
  1000  func (e *LimitInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
  1001  	req.Reset()
  1002  	if e.cursor >= e.end {
  1003  		return nil
  1004  	}
  1005  	for !e.meetFirstBatch {
  1006  		// transfer req's requiredEvents to childResult and then adjust it in childResult
  1007  		e.childResult = e.childResult.SetRequiredEvents(req.RequiredEvents(), e.maxChunkSize)
  1008  		err := Next(ctx, e.children[0], e.adjustRequiredEvents(e.childResult))
  1009  		if err != nil {
  1010  			return err
  1011  		}
  1012  		batchSize := uint64(e.childResult.NumEvents())
  1013  		// no more data.
  1014  		if batchSize == 0 {
  1015  			return nil
  1016  		}
  1017  		if newCursor := e.cursor + batchSize; newCursor >= e.begin {
  1018  			e.meetFirstBatch = true
  1019  			begin, end := e.begin-e.cursor, batchSize
  1020  			if newCursor > e.end {
  1021  				end = e.end - e.cursor
  1022  			}
  1023  			e.cursor += end
  1024  			if begin == end {
  1025  				break
  1026  			}
  1027  			req.Append(e.childResult, int(begin), int(end))
  1028  			return nil
  1029  		}
  1030  		e.cursor += batchSize
  1031  	}
  1032  	e.adjustRequiredEvents(req)
  1033  	err := Next(ctx, e.children[0], req)
  1034  	if err != nil {
  1035  		return err
  1036  	}
  1037  	batchSize := uint64(req.NumEvents())
  1038  	// no more data.
  1039  	if batchSize == 0 {
  1040  		return nil
  1041  	}
  1042  	if e.cursor+batchSize > e.end {
  1043  		req.TruncateTo(int(e.end - e.cursor))
  1044  		batchSize = e.end - e.cursor
  1045  	}
  1046  	e.cursor += batchSize
  1047  	return nil
  1048  }
  1049  
  1050  // Open implements the InterlockingDirectorate Open interface.
  1051  func (e *LimitInterDirc) Open(ctx context.Context) error {
  1052  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
  1053  		return err
  1054  	}
  1055  	e.childResult = newFirstChunk(e.children[0])
  1056  	e.cursor = 0
  1057  	e.meetFirstBatch = e.begin == 0
  1058  	return nil
  1059  }
  1060  
  1061  // Close implements the InterlockingDirectorate Close interface.
  1062  func (e *LimitInterDirc) Close() error {
  1063  	e.childResult = nil
  1064  	return e.baseInterlockingDirectorate.Close()
  1065  }
  1066  
  1067  func (e *LimitInterDirc) adjustRequiredEvents(chk *chunk.Chunk) *chunk.Chunk {
  1068  	// the limit of maximum number of rows the LimitInterDirc should read
  1069  	limitTotal := int(e.end - e.cursor)
  1070  
  1071  	var limitRequired int
  1072  	if e.cursor < e.begin {
  1073  		// if cursor is less than begin, it have to read (begin-cursor) rows to ignore
  1074  		// and then read chk.RequiredEvents() rows to return,
  1075  		// so the limit is (begin-cursor)+chk.RequiredEvents().
  1076  		limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredEvents()
  1077  	} else {
  1078  		// if cursor is equal or larger than begin, just read chk.RequiredEvents() rows to return.
  1079  		limitRequired = chk.RequiredEvents()
  1080  	}
  1081  
  1082  	return chk.SetRequiredEvents(mathutil.Min(limitTotal, limitRequired), e.maxChunkSize)
  1083  }
  1084  
  1085  func init() {
  1086  	// While doing optimization in the plan package, we need to execute uncorrelated subquery,
  1087  	// but the plan package cannot import the interlock package because of the dependency cycle.
  1088  	// So we assign a function implemented in the interlock package to the plan package to avoid the dependency cycle.
  1089  	causetembedded.EvalSubqueryFirstEvent = func(ctx context.Context, p causetembedded.PhysicalCauset, is schemareplicant.SchemaReplicant, sctx stochastikctx.Context) ([]types.Causet, error) {
  1090  		defer func(begin time.Time) {
  1091  			s := sctx.GetStochastikVars()
  1092  			s.RewritePhaseInfo.PreprocessSubQueries++
  1093  			s.RewritePhaseInfo.DurationPreprocessSubQuery += time.Since(begin)
  1094  		}(time.Now())
  1095  
  1096  		if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
  1097  			span1 := span.Tracer().StartSpan("interlock.EvalSubQuery", opentracing.ChildOf(span.Context()))
  1098  			defer span1.Finish()
  1099  			ctx = opentracing.ContextWithSpan(ctx, span1)
  1100  		}
  1101  
  1102  		e := &interlockBuilder{is: is, ctx: sctx}
  1103  		exec := e.build(p)
  1104  		if e.err != nil {
  1105  			return nil, e.err
  1106  		}
  1107  		err := exec.Open(ctx)
  1108  		defer terror.Call(exec.Close)
  1109  		if err != nil {
  1110  			return nil, err
  1111  		}
  1112  		chk := newFirstChunk(exec)
  1113  		for {
  1114  			err = Next(ctx, exec, chk)
  1115  			if err != nil {
  1116  				return nil, err
  1117  			}
  1118  			if chk.NumEvents() == 0 {
  1119  				return nil, nil
  1120  			}
  1121  			event := chk.GetEvent(0).GetCausetEvent(retTypes(exec))
  1122  			return event, err
  1123  		}
  1124  	}
  1125  }
  1126  
  1127  // BlockDualInterDirc represents a dual causet interlock.
  1128  type BlockDualInterDirc struct {
  1129  	baseInterlockingDirectorate
  1130  
  1131  	// numDualEvents can only be 0 or 1.
  1132  	numDualEvents int
  1133  	numReturned   int
  1134  }
  1135  
  1136  // Open implements the InterlockingDirectorate Open interface.
  1137  func (e *BlockDualInterDirc) Open(ctx context.Context) error {
  1138  	e.numReturned = 0
  1139  	return nil
  1140  }
  1141  
  1142  // Next implements the InterlockingDirectorate Next interface.
  1143  func (e *BlockDualInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
  1144  	req.Reset()
  1145  	if e.numReturned >= e.numDualEvents {
  1146  		return nil
  1147  	}
  1148  	if e.Schema().Len() == 0 {
  1149  		req.SetNumVirtualEvents(1)
  1150  	} else {
  1151  		for i := range e.Schema().DeferredCausets {
  1152  			req.AppendNull(i)
  1153  		}
  1154  	}
  1155  	e.numReturned = e.numDualEvents
  1156  	return nil
  1157  }
  1158  
  1159  // SelectionInterDirc represents a filter interlock.
  1160  type SelectionInterDirc struct {
  1161  	baseInterlockingDirectorate
  1162  
  1163  	batched     bool
  1164  	filters     []memex.Expression
  1165  	selected    []bool
  1166  	inputIter   *chunk.Iterator4Chunk
  1167  	inputEvent  chunk.Event
  1168  	childResult *chunk.Chunk
  1169  
  1170  	memTracker *memory.Tracker
  1171  }
  1172  
  1173  // Open implements the InterlockingDirectorate Open interface.
  1174  func (e *SelectionInterDirc) Open(ctx context.Context) error {
  1175  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
  1176  		return err
  1177  	}
  1178  	return e.open(ctx)
  1179  }
  1180  
  1181  func (e *SelectionInterDirc) open(ctx context.Context) error {
  1182  	e.memTracker = memory.NewTracker(e.id, -1)
  1183  	e.memTracker.AttachTo(e.ctx.GetStochastikVars().StmtCtx.MemTracker)
  1184  	e.childResult = newFirstChunk(e.children[0])
  1185  	e.memTracker.Consume(e.childResult.MemoryUsage())
  1186  	e.batched = memex.Vectorizable(e.filters)
  1187  	if e.batched {
  1188  		e.selected = make([]bool, 0, chunk.InitialCapacity)
  1189  	}
  1190  	e.inputIter = chunk.NewIterator4Chunk(e.childResult)
  1191  	e.inputEvent = e.inputIter.End()
  1192  	return nil
  1193  }
  1194  
  1195  // Close implements causetembedded.Causet Close interface.
  1196  func (e *SelectionInterDirc) Close() error {
  1197  	e.memTracker.Consume(-e.childResult.MemoryUsage())
  1198  	e.childResult = nil
  1199  	e.selected = nil
  1200  	return e.baseInterlockingDirectorate.Close()
  1201  }
  1202  
  1203  // Next implements the InterlockingDirectorate Next interface.
  1204  func (e *SelectionInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
  1205  	req.GrowAndReset(e.maxChunkSize)
  1206  
  1207  	if !e.batched {
  1208  		return e.unBatchedNext(ctx, req)
  1209  	}
  1210  
  1211  	for {
  1212  		for ; e.inputEvent != e.inputIter.End(); e.inputEvent = e.inputIter.Next() {
  1213  			if !e.selected[e.inputEvent.Idx()] {
  1214  				continue
  1215  			}
  1216  			if req.IsFull() {
  1217  				return nil
  1218  			}
  1219  			req.AppendEvent(e.inputEvent)
  1220  		}
  1221  		mSize := e.childResult.MemoryUsage()
  1222  		err := Next(ctx, e.children[0], e.childResult)
  1223  		e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
  1224  		if err != nil {
  1225  			return err
  1226  		}
  1227  		// no more data.
  1228  		if e.childResult.NumEvents() == 0 {
  1229  			return nil
  1230  		}
  1231  		e.selected, err = memex.VectorizedFilter(e.ctx, e.filters, e.inputIter, e.selected)
  1232  		if err != nil {
  1233  			return err
  1234  		}
  1235  		e.inputEvent = e.inputIter.Begin()
  1236  	}
  1237  }
  1238  
  1239  // unBatchedNext filters input rows one by one and returns once an input event is selected.
  1240  // For allegrosql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0",
  1241  // we have to set batch size to 1 to do the evaluation of filter and projection.
  1242  func (e *SelectionInterDirc) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error {
  1243  	for {
  1244  		for ; e.inputEvent != e.inputIter.End(); e.inputEvent = e.inputIter.Next() {
  1245  			selected, _, err := memex.EvalBool(e.ctx, e.filters, e.inputEvent)
  1246  			if err != nil {
  1247  				return err
  1248  			}
  1249  			if selected {
  1250  				chk.AppendEvent(e.inputEvent)
  1251  				e.inputEvent = e.inputIter.Next()
  1252  				return nil
  1253  			}
  1254  		}
  1255  		mSize := e.childResult.MemoryUsage()
  1256  		err := Next(ctx, e.children[0], e.childResult)
  1257  		e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
  1258  		if err != nil {
  1259  			return err
  1260  		}
  1261  		e.inputEvent = e.inputIter.Begin()
  1262  		// no more data.
  1263  		if e.childResult.NumEvents() == 0 {
  1264  			return nil
  1265  		}
  1266  	}
  1267  }
  1268  
  1269  // BlockScanInterDirc is a causet scan interlock without result fields.
  1270  type BlockScanInterDirc struct {
  1271  	baseInterlockingDirectorate
  1272  
  1273  	t                     causet.Block
  1274  	defCausumns           []*perceptron.DeferredCausetInfo
  1275  	virtualBlockChunkList *chunk.List
  1276  	virtualBlockChunkIdx  int
  1277  }
  1278  
  1279  // Next implements the InterlockingDirectorate Next interface.
  1280  func (e *BlockScanInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
  1281  	req.GrowAndReset(e.maxChunkSize)
  1282  	return e.nextChunk4SchemaReplicant(ctx, req)
  1283  }
  1284  
  1285  func (e *BlockScanInterDirc) nextChunk4SchemaReplicant(ctx context.Context, chk *chunk.Chunk) error {
  1286  	chk.GrowAndReset(e.maxChunkSize)
  1287  	if e.virtualBlockChunkList == nil {
  1288  		e.virtualBlockChunkList = chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize)
  1289  		defCausumns := make([]*causet.DeferredCauset, e.schemaReplicant.Len())
  1290  		for i, defCausInfo := range e.defCausumns {
  1291  			defCausumns[i] = causet.ToDeferredCauset(defCausInfo)
  1292  		}
  1293  		mublockEvent := chunk.MutEventFromTypes(retTypes(e))
  1294  		err := e.t.IterRecords(e.ctx, nil, defCausumns, func(_ ekv.Handle, rec []types.Causet, defcaus []*causet.DeferredCauset) (bool, error) {
  1295  			mublockEvent.SetCausets(rec...)
  1296  			e.virtualBlockChunkList.AppendEvent(mublockEvent.ToEvent())
  1297  			return true, nil
  1298  		})
  1299  		if err != nil {
  1300  			return err
  1301  		}
  1302  	}
  1303  	// no more data.
  1304  	if e.virtualBlockChunkIdx >= e.virtualBlockChunkList.NumChunks() {
  1305  		return nil
  1306  	}
  1307  	virtualBlockChunk := e.virtualBlockChunkList.GetChunk(e.virtualBlockChunkIdx)
  1308  	e.virtualBlockChunkIdx++
  1309  	chk.SwapDeferredCausets(virtualBlockChunk)
  1310  	return nil
  1311  }
  1312  
  1313  // Open implements the InterlockingDirectorate Open interface.
  1314  func (e *BlockScanInterDirc) Open(ctx context.Context) error {
  1315  	e.virtualBlockChunkList = nil
  1316  	return nil
  1317  }
  1318  
  1319  // MaxOneEventInterDirc checks if the number of rows that a query returns is at maximum one.
  1320  // It's built from subquery memex.
  1321  type MaxOneEventInterDirc struct {
  1322  	baseInterlockingDirectorate
  1323  
  1324  	evaluated bool
  1325  }
  1326  
  1327  // Open implements the InterlockingDirectorate Open interface.
  1328  func (e *MaxOneEventInterDirc) Open(ctx context.Context) error {
  1329  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
  1330  		return err
  1331  	}
  1332  	e.evaluated = false
  1333  	return nil
  1334  }
  1335  
  1336  // Next implements the InterlockingDirectorate Next interface.
  1337  func (e *MaxOneEventInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
  1338  	req.Reset()
  1339  	if e.evaluated {
  1340  		return nil
  1341  	}
  1342  	e.evaluated = true
  1343  	err := Next(ctx, e.children[0], req)
  1344  	if err != nil {
  1345  		return err
  1346  	}
  1347  
  1348  	if num := req.NumEvents(); num == 0 {
  1349  		for i := range e.schemaReplicant.DeferredCausets {
  1350  			req.AppendNull(i)
  1351  		}
  1352  		return nil
  1353  	} else if num != 1 {
  1354  		return errors.New("subquery returns more than 1 event")
  1355  	}
  1356  
  1357  	childChunk := newFirstChunk(e.children[0])
  1358  	err = Next(ctx, e.children[0], childChunk)
  1359  	if err != nil {
  1360  		return err
  1361  	}
  1362  	if childChunk.NumEvents() != 0 {
  1363  		return errors.New("subquery returns more than 1 event")
  1364  	}
  1365  
  1366  	return nil
  1367  }
  1368  
  1369  // UnionInterDirc pulls all it's children's result and returns to its parent directly.
  1370  // A "resultPuller" is started for every child to pull result from that child and push it to the "resultPool", the used
  1371  // "Chunk" is obtained from the corresponding "resourcePool". All resultPullers are running concurrently.
  1372  //                             +----------------+
  1373  //   +---> resourcePool 1 ---> | resultPuller 1 |-----+
  1374  //   |                         +----------------+     |
  1375  //   |                                                |
  1376  //   |                         +----------------+     v
  1377  //   +---> resourcePool 2 ---> | resultPuller 2 |-----> resultPool ---+
  1378  //   |                         +----------------+     ^               |
  1379  //   |                               ......           |               |
  1380  //   |                         +----------------+     |               |
  1381  //   +---> resourcePool n ---> | resultPuller n |-----+               |
  1382  //   |                         +----------------+                     |
  1383  //   |                                                                |
  1384  //   |                          +-------------+                       |
  1385  //   |--------------------------| main thread | <---------------------+
  1386  //                              +-------------+
  1387  type UnionInterDirc struct {
  1388  	baseInterlockingDirectorate
  1389  	concurrency int
  1390  	childIDChan chan int
  1391  
  1392  	stopFetchData atomic.Value
  1393  
  1394  	finished      chan struct{}
  1395  	resourcePools []chan *chunk.Chunk
  1396  	resultPool    chan *unionWorkerResult
  1397  
  1398  	results     []*chunk.Chunk
  1399  	wg          sync.WaitGroup
  1400  	initialized bool
  1401  }
  1402  
  1403  // unionWorkerResult stores the result for a union worker.
  1404  // A "resultPuller" is started for every child to pull result from that child, unionWorkerResult is used to causetstore that pulled result.
  1405  // "src" is used for Chunk reuse: after pulling result from "resultPool", main-thread must push a valid unused Chunk to "src" to
  1406  // enable the corresponding "resultPuller" continue to work.
  1407  type unionWorkerResult struct {
  1408  	chk *chunk.Chunk
  1409  	err error
  1410  	src chan<- *chunk.Chunk
  1411  }
  1412  
  1413  func (e *UnionInterDirc) waitAllFinished() {
  1414  	e.wg.Wait()
  1415  	close(e.resultPool)
  1416  }
  1417  
  1418  // Open implements the InterlockingDirectorate Open interface.
  1419  func (e *UnionInterDirc) Open(ctx context.Context) error {
  1420  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
  1421  		return err
  1422  	}
  1423  	e.stopFetchData.CausetStore(false)
  1424  	e.initialized = false
  1425  	e.finished = make(chan struct{})
  1426  	return nil
  1427  }
  1428  
  1429  func (e *UnionInterDirc) initialize(ctx context.Context) {
  1430  	if e.concurrency > len(e.children) {
  1431  		e.concurrency = len(e.children)
  1432  	}
  1433  	for i := 0; i < e.concurrency; i++ {
  1434  		e.results = append(e.results, newFirstChunk(e.children[0]))
  1435  	}
  1436  	e.resultPool = make(chan *unionWorkerResult, e.concurrency)
  1437  	e.resourcePools = make([]chan *chunk.Chunk, e.concurrency)
  1438  	e.childIDChan = make(chan int, len(e.children))
  1439  	for i := 0; i < e.concurrency; i++ {
  1440  		e.resourcePools[i] = make(chan *chunk.Chunk, 1)
  1441  		e.resourcePools[i] <- e.results[i]
  1442  		e.wg.Add(1)
  1443  		go e.resultPuller(ctx, i)
  1444  	}
  1445  	for i := 0; i < len(e.children); i++ {
  1446  		e.childIDChan <- i
  1447  	}
  1448  	close(e.childIDChan)
  1449  	go e.waitAllFinished()
  1450  }
  1451  
  1452  func (e *UnionInterDirc) resultPuller(ctx context.Context, workerID int) {
  1453  	result := &unionWorkerResult{
  1454  		err: nil,
  1455  		chk: nil,
  1456  		src: e.resourcePools[workerID],
  1457  	}
  1458  	defer func() {
  1459  		if r := recover(); r != nil {
  1460  			buf := make([]byte, 4096)
  1461  			stackSize := runtime.Stack(buf, false)
  1462  			buf = buf[:stackSize]
  1463  			logutil.Logger(ctx).Error("resultPuller panicked", zap.String("stack", string(buf)))
  1464  			result.err = errors.Errorf("%v", r)
  1465  			e.resultPool <- result
  1466  			e.stopFetchData.CausetStore(true)
  1467  		}
  1468  		e.wg.Done()
  1469  	}()
  1470  	for childID := range e.childIDChan {
  1471  		for {
  1472  			if e.stopFetchData.Load().(bool) {
  1473  				return
  1474  			}
  1475  			select {
  1476  			case <-e.finished:
  1477  				return
  1478  			case result.chk = <-e.resourcePools[workerID]:
  1479  			}
  1480  			result.err = Next(ctx, e.children[childID], result.chk)
  1481  			if result.err == nil && result.chk.NumEvents() == 0 {
  1482  				e.resourcePools[workerID] <- result.chk
  1483  				break
  1484  			}
  1485  			e.resultPool <- result
  1486  			if result.err != nil {
  1487  				e.stopFetchData.CausetStore(true)
  1488  				return
  1489  			}
  1490  		}
  1491  	}
  1492  }
  1493  
  1494  // Next implements the InterlockingDirectorate Next interface.
  1495  func (e *UnionInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
  1496  	req.GrowAndReset(e.maxChunkSize)
  1497  	if !e.initialized {
  1498  		e.initialize(ctx)
  1499  		e.initialized = true
  1500  	}
  1501  	result, ok := <-e.resultPool
  1502  	if !ok {
  1503  		return nil
  1504  	}
  1505  	if result.err != nil {
  1506  		return errors.Trace(result.err)
  1507  	}
  1508  
  1509  	req.SwapDeferredCausets(result.chk)
  1510  	result.src <- result.chk
  1511  	return nil
  1512  }
  1513  
  1514  // Close implements the InterlockingDirectorate Close interface.
  1515  func (e *UnionInterDirc) Close() error {
  1516  	if e.finished != nil {
  1517  		close(e.finished)
  1518  	}
  1519  	e.results = nil
  1520  	if e.resultPool != nil {
  1521  		for range e.resultPool {
  1522  		}
  1523  	}
  1524  	e.resourcePools = nil
  1525  	if e.childIDChan != nil {
  1526  		for range e.childIDChan {
  1527  		}
  1528  	}
  1529  	return e.baseInterlockingDirectorate.Close()
  1530  }
  1531  
  1532  // ResetContextOfStmt resets the StmtContext and stochastik variables.
  1533  // Before every execution, we must clear memex context.
  1534  func ResetContextOfStmt(ctx stochastikctx.Context, s ast.StmtNode) (err error) {
  1535  	vars := ctx.GetStochastikVars()
  1536  	sc := &stmtctx.StatementContext{
  1537  		TimeZone:    vars.Location(),
  1538  		MemTracker:  memory.NewTracker(memory.LabelForALLEGROSQLText, vars.MemQuotaQuery),
  1539  		DiskTracker: disk.NewTracker(memory.LabelForALLEGROSQLText, -1),
  1540  		TaskID:      stmtctx.AllocateTaskID(),
  1541  	}
  1542  	sc.MemTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker)
  1543  	globalConfig := config.GetGlobalConfig()
  1544  	if globalConfig.OOMUseTmpStorage && GlobalDiskUsageTracker != nil {
  1545  		sc.DiskTracker.AttachToGlobalTracker(GlobalDiskUsageTracker)
  1546  	}
  1547  	switch globalConfig.OOMCausetAction {
  1548  	case config.OOMCausetActionCancel:
  1549  		action := &memory.PanicOnExceed{ConnID: ctx.GetStochastikVars().ConnectionID}
  1550  		action.SetLogHook(petri.GetPetri(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota)
  1551  		sc.MemTracker.SetSuperCowOrNoCausetOnExceed(action)
  1552  	case config.OOMCausetActionLog:
  1553  		fallthrough
  1554  	default:
  1555  		action := &memory.RepLogCausetOnExceed{ConnID: ctx.GetStochastikVars().ConnectionID}
  1556  		action.SetLogHook(petri.GetPetri(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota)
  1557  		sc.MemTracker.SetSuperCowOrNoCausetOnExceed(action)
  1558  	}
  1559  	if execStmt, ok := s.(*ast.InterDircuteStmt); ok {
  1560  		s, err = causet.GetPreparedStmt(execStmt, vars)
  1561  		if err != nil {
  1562  			return
  1563  		}
  1564  	}
  1565  	// execute missed stmtID uses empty allegrosql
  1566  	sc.OriginalALLEGROSQL = s.Text()
  1567  	if explainStmt, ok := s.(*ast.ExplainStmt); ok {
  1568  		sc.InExplainStmt = true
  1569  		s = explainStmt.Stmt
  1570  	}
  1571  	if _, ok := s.(*ast.ExplainForStmt); ok {
  1572  		sc.InExplainStmt = true
  1573  	}
  1574  	// TODO: Many same bool variables here.
  1575  	// We should set only two variables (
  1576  	// IgnoreErr and StrictALLEGROSQLMode) to avoid setting the same bool variables and
  1577  	// pushing them down to EinsteinDB as flags.
  1578  	switch stmt := s.(type) {
  1579  	case *ast.UFIDelateStmt:
  1580  		ResetUFIDelateStmtCtx(sc, stmt, vars)
  1581  	case *ast.DeleteStmt:
  1582  		sc.InDeleteStmt = true
  1583  		sc.DupKeyAsWarning = stmt.IgnoreErr
  1584  		sc.BadNullAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1585  		sc.TruncateAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1586  		sc.DividedByZeroAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1587  		sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1588  		sc.IgnoreZeroInDate = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
  1589  		sc.Priority = stmt.Priority
  1590  	case *ast.InsertStmt:
  1591  		sc.InInsertStmt = true
  1592  		// For insert memex (not for uFIDelate memex), disabling the StrictALLEGROSQLMode
  1593  		// should make TruncateAsWarning and DividedByZeroAsWarning,
  1594  		// but should not make DupKeyAsWarning or BadNullAsWarning,
  1595  		sc.DupKeyAsWarning = stmt.IgnoreErr
  1596  		sc.BadNullAsWarning = stmt.IgnoreErr
  1597  		sc.TruncateAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1598  		sc.DividedByZeroAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1599  		sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1600  		sc.IgnoreZeroInDate = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
  1601  		sc.Priority = stmt.Priority
  1602  	case *ast.CreateBlockStmt, *ast.AlterBlockStmt:
  1603  		// Make sure the sql_mode is strict when checking defCausumn default value.
  1604  	case *ast.LoadDataStmt:
  1605  		sc.DupKeyAsWarning = true
  1606  		sc.BadNullAsWarning = true
  1607  		sc.TruncateAsWarning = !vars.StrictALLEGROSQLMode
  1608  		sc.InLoadDataStmt = true
  1609  	case *ast.SelectStmt:
  1610  		sc.InSelectStmt = true
  1611  
  1612  		// see https://dev.allegrosql.com/doc/refman/5.7/en/allegrosql-mode.html#allegrosql-mode-strict
  1613  		// said "For memexs such as SELECT that do not change data, invalid values
  1614  		// generate a warning in strict mode, not an error."
  1615  		// and https://dev.allegrosql.com/doc/refman/5.7/en/out-of-range-and-overflow.html
  1616  		sc.OverflowAsWarning = true
  1617  
  1618  		// Return warning for truncate error in selection.
  1619  		sc.TruncateAsWarning = true
  1620  		sc.IgnoreZeroInDate = true
  1621  		sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1622  		if opts := stmt.SelectStmtOpts; opts != nil {
  1623  			sc.Priority = opts.Priority
  1624  			sc.NotFillCache = !opts.ALLEGROSQLCache
  1625  		}
  1626  	case *ast.SetOprStmt:
  1627  		sc.InSelectStmt = true
  1628  		sc.OverflowAsWarning = true
  1629  		sc.TruncateAsWarning = true
  1630  		sc.IgnoreZeroInDate = true
  1631  		sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1632  	case *ast.ShowStmt:
  1633  		sc.IgnoreTruncate = true
  1634  		sc.IgnoreZeroInDate = true
  1635  		sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1636  		if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors {
  1637  			sc.InShowWarning = true
  1638  			sc.SetWarnings(vars.StmtCtx.GetWarnings())
  1639  		}
  1640  	case *ast.SplitRegionStmt:
  1641  		sc.IgnoreTruncate = false
  1642  		sc.IgnoreZeroInDate = true
  1643  		sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1644  	default:
  1645  		sc.IgnoreTruncate = true
  1646  		sc.IgnoreZeroInDate = true
  1647  		sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1648  	}
  1649  	vars.PreparedParams = vars.PreparedParams[:0]
  1650  	if priority := allegrosql.PriorityEnum(atomic.LoadInt32(&variable.ForcePriority)); priority != allegrosql.NoPriority {
  1651  		sc.Priority = priority
  1652  	}
  1653  	if vars.StmtCtx.LastInsertID > 0 {
  1654  		sc.PrevLastInsertID = vars.StmtCtx.LastInsertID
  1655  	} else {
  1656  		sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID
  1657  	}
  1658  	sc.PrevAffectedEvents = 0
  1659  	if vars.StmtCtx.InUFIDelateStmt || vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt {
  1660  		sc.PrevAffectedEvents = int64(vars.StmtCtx.AffectedEvents())
  1661  	} else if vars.StmtCtx.InSelectStmt {
  1662  		sc.PrevAffectedEvents = -1
  1663  	}
  1664  	if globalConfig.EnableDefCauslectInterDircutionInfo {
  1665  		sc.RuntimeStatsDefCausl = execdetails.NewRuntimeStatsDefCausl()
  1666  	}
  1667  
  1668  	sc.TblInfo2UnionScan = make(map[*perceptron.BlockInfo]bool)
  1669  	errCount, warnCount := vars.StmtCtx.NumErrorWarnings()
  1670  	vars.SysErrorCount = errCount
  1671  	vars.SysWarningCount = warnCount
  1672  	vars.StmtCtx = sc
  1673  	vars.PrevFoundInCausetCache = vars.FoundInCausetCache
  1674  	vars.FoundInCausetCache = false
  1675  	return
  1676  }
  1677  
  1678  // ResetUFIDelateStmtCtx resets memex context for UFIDelateStmt.
  1679  func ResetUFIDelateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UFIDelateStmt, vars *variable.StochastikVars) {
  1680  	sc.InUFIDelateStmt = true
  1681  	sc.DupKeyAsWarning = stmt.IgnoreErr
  1682  	sc.BadNullAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1683  	sc.TruncateAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1684  	sc.DividedByZeroAsWarning = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr
  1685  	sc.AllowInvalidDate = vars.ALLEGROSQLMode.HasAllowInvalidDatesMode()
  1686  	sc.IgnoreZeroInDate = !vars.StrictALLEGROSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
  1687  	sc.Priority = stmt.Priority
  1688  }
  1689  
  1690  // FillVirtualDeferredCausetValue will calculate the virtual defCausumn value by evaluating generated
  1691  // memex using rows from a chunk, and then fill this value into the chunk
  1692  func FillVirtualDeferredCausetValue(virtualRetTypes []*types.FieldType, virtualDeferredCausetIndex []int,
  1693  	schemaReplicant *memex.Schema, defCausumns []*perceptron.DeferredCausetInfo, sctx stochastikctx.Context, req *chunk.Chunk) error {
  1694  	virDefCauss := chunk.NewChunkWithCapacity(virtualRetTypes, req.Capacity())
  1695  	iter := chunk.NewIterator4Chunk(req)
  1696  	for i, idx := range virtualDeferredCausetIndex {
  1697  		for event := iter.Begin(); event != iter.End(); event = iter.Next() {
  1698  			causet, err := schemaReplicant.DeferredCausets[idx].EvalVirtualDeferredCauset(event)
  1699  			if err != nil {
  1700  				return err
  1701  			}
  1702  			// Because the memex might return different type from
  1703  			// the generated defCausumn, we should wrap a CAST on the result.
  1704  			castCauset, err := causet.CastValue(sctx, causet, defCausumns[idx], false, true)
  1705  			if err != nil {
  1706  				return err
  1707  			}
  1708  			virDefCauss.AppendCauset(i, &castCauset)
  1709  		}
  1710  		req.SetDefCaus(idx, virDefCauss.DeferredCauset(i))
  1711  	}
  1712  	return nil
  1713  }