github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/adapter.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package interlock
    15  
    16  import (
    17  	"context"
    18  	"fmt"
    19  	"math"
    20  	"strconv"
    21  	"strings"
    22  	"sync/atomic"
    23  	"time"
    24  
    25  	"github.com/opentracing/opentracing-go"
    26  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    27  	"github.com/whtcorpsinc/BerolinaSQL/ast"
    28  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    29  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    30  	"github.com/whtcorpsinc/errors"
    31  	"github.com/whtcorpsinc/log"
    32  	"github.com/whtcorpsinc/milevadb/causet"
    33  	causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded"
    34  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb"
    35  	"github.com/whtcorpsinc/milevadb/config"
    36  	"github.com/whtcorpsinc/milevadb/ekv"
    37  	"github.com/whtcorpsinc/milevadb/memex"
    38  	"github.com/whtcorpsinc/milevadb/metrics"
    39  	"github.com/whtcorpsinc/milevadb/petri"
    40  	"github.com/whtcorpsinc/milevadb/plugin"
    41  	"github.com/whtcorpsinc/milevadb/schemareplicant"
    42  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    43  	"github.com/whtcorpsinc/milevadb/soliton/execdetails"
    44  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    45  	"github.com/whtcorpsinc/milevadb/soliton/memory"
    46  	"github.com/whtcorpsinc/milevadb/soliton/plancodec"
    47  	"github.com/whtcorpsinc/milevadb/soliton/sqlexec"
    48  	"github.com/whtcorpsinc/milevadb/soliton/stmtsummary"
    49  	"github.com/whtcorpsinc/milevadb/soliton/stringutil"
    50  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    51  	"github.com/whtcorpsinc/milevadb/stochastikctx/variable"
    52  	"github.com/whtcorpsinc/milevadb/types"
    53  	"go.uber.org/zap"
    54  	"go.uber.org/zap/zapembedded"
    55  )
    56  
    57  // processinfoSetter is the interface use to set current running process info.
    58  type processinfoSetter interface {
    59  	SetProcessInfo(string, time.Time, byte, uint64)
    60  }
    61  
    62  // recordSet wraps an interlock, implements sqlexec.RecordSet interface
    63  type recordSet struct {
    64  	fields     []*ast.ResultField
    65  	interlock  InterlockingDirectorate
    66  	stmt       *InterDircStmt
    67  	lastErr    error
    68  	txnStartTS uint64
    69  }
    70  
    71  func (a *recordSet) Fields() []*ast.ResultField {
    72  	if len(a.fields) == 0 {
    73  		a.fields = defCausNames2ResultFields(a.interlock.Schema(), a.stmt.OutputNames, a.stmt.Ctx.GetStochastikVars().CurrentDB)
    74  	}
    75  	return a.fields
    76  }
    77  
    78  func defCausNames2ResultFields(schemaReplicant *memex.Schema, names []*types.FieldName, defaultDB string) []*ast.ResultField {
    79  	rfs := make([]*ast.ResultField, 0, schemaReplicant.Len())
    80  	defaultDBCIStr := perceptron.NewCIStr(defaultDB)
    81  	for i := 0; i < schemaReplicant.Len(); i++ {
    82  		dbName := names[i].DBName
    83  		if dbName.L == "" && names[i].TblName.L != "" {
    84  			dbName = defaultDBCIStr
    85  		}
    86  		origDefCausName := names[i].OrigDefCausName
    87  		if origDefCausName.L == "" {
    88  			origDefCausName = names[i].DefCausName
    89  		}
    90  		rf := &ast.ResultField{
    91  			DeferredCauset:       &perceptron.DeferredCausetInfo{Name: origDefCausName, FieldType: *schemaReplicant.DeferredCausets[i].RetType},
    92  			DeferredCausetAsName: names[i].DefCausName,
    93  			Block:                &perceptron.BlockInfo{Name: names[i].OrigTblName},
    94  			BlockAsName:          names[i].TblName,
    95  			DBName:               dbName,
    96  		}
    97  		// This is for compatibility.
    98  		// See issue https://github.com/whtcorpsinc/milevadb/issues/10513 .
    99  		if len(rf.DeferredCausetAsName.O) > allegrosql.MaxAliasIdentifierLen {
   100  			rf.DeferredCausetAsName.O = rf.DeferredCausetAsName.O[:allegrosql.MaxAliasIdentifierLen]
   101  		}
   102  		// Usually the length of O equals the length of L.
   103  		// Add this len judgement to avoid panic.
   104  		if len(rf.DeferredCausetAsName.L) > allegrosql.MaxAliasIdentifierLen {
   105  			rf.DeferredCausetAsName.L = rf.DeferredCausetAsName.L[:allegrosql.MaxAliasIdentifierLen]
   106  		}
   107  		rfs = append(rfs, rf)
   108  	}
   109  	return rfs
   110  }
   111  
   112  // Next use uses recordSet's interlock to get next available chunk for later usage.
   113  // If chunk does not contain any rows, then we uFIDelate last query found rows in stochastik variable as current found rows.
   114  // The reason we need uFIDelate is that chunk with 0 rows indicating we already finished current query, we need prepare for
   115  // next query.
   116  // If stmt is not nil and chunk with some rows inside, we simply uFIDelate last query found rows by the number of event in chunk.
   117  func (a *recordSet) Next(ctx context.Context, req *chunk.Chunk) (err error) {
   118  	defer func() {
   119  		r := recover()
   120  		if r == nil {
   121  			return
   122  		}
   123  		err = errors.Errorf("%v", r)
   124  		logutil.Logger(ctx).Error("execute allegrosql panic", zap.String("allegrosql", a.stmt.GetTextToLog()), zap.Stack("stack"))
   125  	}()
   126  
   127  	err = Next(ctx, a.interlock, req)
   128  	if err != nil {
   129  		a.lastErr = err
   130  		return err
   131  	}
   132  	numEvents := req.NumEvents()
   133  	if numEvents == 0 {
   134  		if a.stmt != nil {
   135  			a.stmt.Ctx.GetStochastikVars().LastFoundEvents = a.stmt.Ctx.GetStochastikVars().StmtCtx.FoundEvents()
   136  		}
   137  		return nil
   138  	}
   139  	if a.stmt != nil {
   140  		a.stmt.Ctx.GetStochastikVars().StmtCtx.AddFoundEvents(uint64(numEvents))
   141  	}
   142  	return nil
   143  }
   144  
   145  // NewChunk create a chunk base on top-level interlock's newFirstChunk().
   146  func (a *recordSet) NewChunk() *chunk.Chunk {
   147  	return newFirstChunk(a.interlock)
   148  }
   149  
   150  func (a *recordSet) Close() error {
   151  	err := a.interlock.Close()
   152  	a.stmt.CloseRecordSet(a.txnStartTS, a.lastErr)
   153  	return err
   154  }
   155  
   156  // OnFetchReturned implements commandLifeCycle#OnFetchReturned
   157  func (a *recordSet) OnFetchReturned() {
   158  	a.stmt.LogSlowQuery(a.txnStartTS, a.lastErr == nil, true)
   159  }
   160  
   161  // InterDircStmt implements the sqlexec.Statement interface, it builds a causet.Causet to an sqlexec.Statement.
   162  type InterDircStmt struct {
   163  	// GoCtx stores parent go context.Context for a stmt.
   164  	GoCtx context.Context
   165  	// SchemaReplicant stores a reference to the schemaReplicant information.
   166  	SchemaReplicant schemareplicant.SchemaReplicant
   167  	// Causet stores a reference to the final physical plan.
   168  	Causet causetembedded.Causet
   169  	// Text represents the origin query text.
   170  	Text string
   171  
   172  	StmtNode ast.StmtNode
   173  
   174  	Ctx stochastikctx.Context
   175  
   176  	// LowerPriority represents whether to lower the execution priority of a query.
   177  	LowerPriority        bool
   178  	isPreparedStmt       bool
   179  	isSelectForUFIDelate bool
   180  	retryCount           uint
   181  	retryStartTime       time.Time
   182  
   183  	// OutputNames will be set if using cached plan
   184  	OutputNames []*types.FieldName
   185  	PsStmt      *causetembedded.CachedPrepareStmt
   186  }
   187  
   188  // PointGet short path for point exec directly from plan, keep only necessary steps
   189  func (a *InterDircStmt) PointGet(ctx context.Context, is schemareplicant.SchemaReplicant) (*recordSet, error) {
   190  	if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
   191  		span1 := span.Tracer().StartSpan("InterDircStmt.PointGet", opentracing.ChildOf(span.Context()))
   192  		span1.LogKV("allegrosql", a.OriginText())
   193  		defer span1.Finish()
   194  		ctx = opentracing.ContextWithSpan(ctx, span1)
   195  	}
   196  	startTs := uint64(math.MaxUint64)
   197  	err := a.Ctx.InitTxnWithStartTS(startTs)
   198  	if err != nil {
   199  		return nil, err
   200  	}
   201  	a.Ctx.GetStochastikVars().StmtCtx.Priority = ekv.PriorityHigh
   202  
   203  	// try to reuse point get interlock
   204  	if a.PsStmt.InterlockingDirectorate != nil {
   205  		exec, ok := a.PsStmt.InterlockingDirectorate.(*PointGetInterlockingDirectorate)
   206  		if !ok {
   207  			logutil.Logger(ctx).Error("invalid interlock type, not PointGetInterlockingDirectorate for point get path")
   208  			a.PsStmt.InterlockingDirectorate = nil
   209  		} else {
   210  			// CachedCauset type is already checked in last step
   211  			pointGetCauset := a.PsStmt.PreparedAst.CachedCauset.(*causetembedded.PointGetCauset)
   212  			exec.Init(pointGetCauset, startTs)
   213  			a.PsStmt.InterlockingDirectorate = exec
   214  		}
   215  	}
   216  	if a.PsStmt.InterlockingDirectorate == nil {
   217  		b := newInterlockingDirectorateBuilder(a.Ctx, is)
   218  		newInterlockingDirectorate := b.build(a.Causet)
   219  		if b.err != nil {
   220  			return nil, b.err
   221  		}
   222  		a.PsStmt.InterlockingDirectorate = newInterlockingDirectorate
   223  	}
   224  	pointInterlockingDirectorate := a.PsStmt.InterlockingDirectorate.(*PointGetInterlockingDirectorate)
   225  	if err = pointInterlockingDirectorate.Open(ctx); err != nil {
   226  		terror.Call(pointInterlockingDirectorate.Close)
   227  		return nil, err
   228  	}
   229  	return &recordSet{
   230  		interlock:  pointInterlockingDirectorate,
   231  		stmt:       a,
   232  		txnStartTS: startTs,
   233  	}, nil
   234  }
   235  
   236  // OriginText returns original memex as a string.
   237  func (a *InterDircStmt) OriginText() string {
   238  	return a.Text
   239  }
   240  
   241  // IsPrepared returns true if stmt is a prepare memex.
   242  func (a *InterDircStmt) IsPrepared() bool {
   243  	return a.isPreparedStmt
   244  }
   245  
   246  // IsReadOnly returns true if a memex is read only.
   247  // If current StmtNode is an InterDircuteStmt, we can get its prepared stmt,
   248  // then using ast.IsReadOnly function to determine a memex is read only or not.
   249  func (a *InterDircStmt) IsReadOnly(vars *variable.StochastikVars) bool {
   250  	return causet.IsReadOnly(a.StmtNode, vars)
   251  }
   252  
   253  // RebuildCauset rebuilds current execute memex plan.
   254  // It returns the current information schemaReplicant version that 'a' is using.
   255  func (a *InterDircStmt) RebuildCauset(ctx context.Context) (int64, error) {
   256  	is := schemareplicant.GetSchemaReplicant(a.Ctx)
   257  	a.SchemaReplicant = is
   258  	if err := causetembedded.Preprocess(a.Ctx, a.StmtNode, is, causetembedded.InTxnRetry); err != nil {
   259  		return 0, err
   260  	}
   261  	p, names, err := causet.Optimize(ctx, a.Ctx, a.StmtNode, is)
   262  	if err != nil {
   263  		return 0, err
   264  	}
   265  	a.OutputNames = names
   266  	a.Causet = p
   267  	return is.SchemaMetaVersion(), nil
   268  }
   269  
   270  // InterDirc builds an InterlockingDirectorate from a plan. If the InterlockingDirectorate doesn't return result,
   271  // like the INSERT, UFIDelATE memexs, it executes in this function, if the InterlockingDirectorate returns
   272  // result, execution is done after this function returns, in the returned sqlexec.RecordSet Next method.
   273  func (a *InterDircStmt) InterDirc(ctx context.Context) (_ sqlexec.RecordSet, err error) {
   274  	defer func() {
   275  		r := recover()
   276  		if r == nil {
   277  			if a.retryCount > 0 {
   278  				metrics.StatementPessimisticRetryCount.Observe(float64(a.retryCount))
   279  			}
   280  			lockKeysCnt := a.Ctx.GetStochastikVars().StmtCtx.LockKeysCount
   281  			if lockKeysCnt > 0 {
   282  				metrics.StatementLockKeysCount.Observe(float64(lockKeysCnt))
   283  			}
   284  			return
   285  		}
   286  		if str, ok := r.(string); !ok || !strings.HasPrefix(str, memory.PanicMemoryExceed) {
   287  			panic(r)
   288  		}
   289  		err = errors.Errorf("%v", r)
   290  		logutil.Logger(ctx).Error("execute allegrosql panic", zap.String("allegrosql", a.GetTextToLog()), zap.Stack("stack"))
   291  	}()
   292  
   293  	sctx := a.Ctx
   294  	ctx = stochastikctx.SetCommitCtx(ctx, sctx)
   295  	if _, ok := a.Causet.(*causetembedded.Analyze); ok && sctx.GetStochastikVars().InRestrictedALLEGROSQL {
   296  		oriStats, _ := sctx.GetStochastikVars().GetSystemVar(variable.MilevaDBBuildStatsConcurrency)
   297  		oriScan := sctx.GetStochastikVars().DistALLEGROSQLScanConcurrency()
   298  		oriIndex := sctx.GetStochastikVars().IndexSerialScanConcurrency()
   299  		oriIso, _ := sctx.GetStochastikVars().GetSystemVar(variable.TxnIsolation)
   300  		terror.Log(sctx.GetStochastikVars().SetSystemVar(variable.MilevaDBBuildStatsConcurrency, "1"))
   301  		sctx.GetStochastikVars().SetDistALLEGROSQLScanConcurrency(1)
   302  		sctx.GetStochastikVars().SetIndexSerialScanConcurrency(1)
   303  		terror.Log(sctx.GetStochastikVars().SetSystemVar(variable.TxnIsolation, ast.ReadCommitted))
   304  		defer func() {
   305  			terror.Log(sctx.GetStochastikVars().SetSystemVar(variable.MilevaDBBuildStatsConcurrency, oriStats))
   306  			sctx.GetStochastikVars().SetDistALLEGROSQLScanConcurrency(oriScan)
   307  			sctx.GetStochastikVars().SetIndexSerialScanConcurrency(oriIndex)
   308  			terror.Log(sctx.GetStochastikVars().SetSystemVar(variable.TxnIsolation, oriIso))
   309  		}()
   310  	}
   311  
   312  	if sctx.GetStochastikVars().StmtCtx.HasMemQuotaHint {
   313  		sctx.GetStochastikVars().StmtCtx.MemTracker.SetBytesLimit(sctx.GetStochastikVars().StmtCtx.MemQuotaQuery)
   314  	}
   315  
   316  	e, err := a.buildInterlockingDirectorate()
   317  	if err != nil {
   318  		return nil, err
   319  	}
   320  
   321  	if err = e.Open(ctx); err != nil {
   322  		terror.Call(e.Close)
   323  		return nil, err
   324  	}
   325  
   326  	cmd32 := atomic.LoadUint32(&sctx.GetStochastikVars().CommandValue)
   327  	cmd := byte(cmd32)
   328  	var pi processinfoSetter
   329  	if raw, ok := sctx.(processinfoSetter); ok {
   330  		pi = raw
   331  		allegrosql := a.OriginText()
   332  		if simple, ok := a.Causet.(*causetembedded.Simple); ok && simple.Statement != nil {
   333  			if ss, ok := simple.Statement.(ast.SensitiveStmtNode); ok {
   334  				// Use SecureText to avoid leak password information.
   335  				allegrosql = ss.SecureText()
   336  			}
   337  		}
   338  		maxInterDircutionTime := getMaxInterDircutionTime(sctx)
   339  		// UFIDelate processinfo, ShowProcess() will use it.
   340  		pi.SetProcessInfo(allegrosql, time.Now(), cmd, maxInterDircutionTime)
   341  		if a.Ctx.GetStochastikVars().StmtCtx.StmtType == "" {
   342  			a.Ctx.GetStochastikVars().StmtCtx.StmtType = GetStmtLabel(a.StmtNode)
   343  		}
   344  	}
   345  
   346  	isPessimistic := sctx.GetStochastikVars().TxnCtx.IsPessimistic
   347  
   348  	// Special handle for "select for uFIDelate memex" in pessimistic transaction.
   349  	if isPessimistic && a.isSelectForUFIDelate {
   350  		return a.handlePessimisticSelectForUFIDelate(ctx, e)
   351  	}
   352  
   353  	if handled, result, err := a.handleNoDelay(ctx, e, isPessimistic); handled {
   354  		return result, err
   355  	}
   356  
   357  	var txnStartTS uint64
   358  	txn, err := sctx.Txn(false)
   359  	if err != nil {
   360  		return nil, err
   361  	}
   362  	if txn.Valid() {
   363  		txnStartTS = txn.StartTS()
   364  	}
   365  	return &recordSet{
   366  		interlock:  e,
   367  		stmt:       a,
   368  		txnStartTS: txnStartTS,
   369  	}, nil
   370  }
   371  
   372  func (a *InterDircStmt) handleNoDelay(ctx context.Context, e InterlockingDirectorate, isPessimistic bool) (handled bool, rs sqlexec.RecordSet, err error) {
   373  	sc := a.Ctx.GetStochastikVars().StmtCtx
   374  	defer func() {
   375  		// If the stmt have no rs like `insert`, The stochastik tracker detachment will be directly
   376  		// done in the `defer` function. If the rs is not nil, the detachment will be done in
   377  		// `rs.Close` in `handleStmt`
   378  		if sc != nil && rs == nil {
   379  			if sc.MemTracker != nil {
   380  				sc.MemTracker.DetachFromGlobalTracker()
   381  			}
   382  			if sc.DiskTracker != nil {
   383  				sc.DiskTracker.DetachFromGlobalTracker()
   384  			}
   385  		}
   386  	}()
   387  
   388  	toCheck := e
   389  	if explain, ok := e.(*ExplainInterDirc); ok {
   390  		if explain.analyzeInterDirc != nil {
   391  			toCheck = explain.analyzeInterDirc
   392  		}
   393  	}
   394  
   395  	// If the interlock doesn't return any result to the client, we execute it without delay.
   396  	if toCheck.Schema().Len() == 0 {
   397  		if isPessimistic {
   398  			return true, nil, a.handlePessimisticDML(ctx, e)
   399  		}
   400  		r, err := a.handleNoDelayInterlockingDirectorate(ctx, e)
   401  		return true, r, err
   402  	} else if proj, ok := toCheck.(*ProjectionInterDirc); ok && proj.calculateNoDelay {
   403  		// Currently this is only for the "DO" memex. Take "DO 1, @a=2;" as an example:
   404  		// the Projection has two memexs and two defCausumns in the schemaReplicant, but we should
   405  		// not return the result of the two memexs.
   406  		r, err := a.handleNoDelayInterlockingDirectorate(ctx, e)
   407  		return true, r, err
   408  	}
   409  
   410  	return false, nil, nil
   411  }
   412  
   413  // getMaxInterDircutionTime get the max execution timeout value.
   414  func getMaxInterDircutionTime(sctx stochastikctx.Context) uint64 {
   415  	if sctx.GetStochastikVars().StmtCtx.HasMaxInterDircutionTime {
   416  		return sctx.GetStochastikVars().StmtCtx.MaxInterDircutionTime
   417  	}
   418  	return sctx.GetStochastikVars().MaxInterDircutionTime
   419  }
   420  
   421  type chunkEventRecordSet struct {
   422  	rows     []chunk.Event
   423  	idx      int
   424  	fields   []*ast.ResultField
   425  	e        InterlockingDirectorate
   426  	execStmt *InterDircStmt
   427  }
   428  
   429  func (c *chunkEventRecordSet) Fields() []*ast.ResultField {
   430  	return c.fields
   431  }
   432  
   433  func (c *chunkEventRecordSet) Next(ctx context.Context, chk *chunk.Chunk) error {
   434  	chk.Reset()
   435  	for !chk.IsFull() && c.idx < len(c.rows) {
   436  		chk.AppendEvent(c.rows[c.idx])
   437  		c.idx++
   438  	}
   439  	return nil
   440  }
   441  
   442  func (c *chunkEventRecordSet) NewChunk() *chunk.Chunk {
   443  	return newFirstChunk(c.e)
   444  }
   445  
   446  func (c *chunkEventRecordSet) Close() error {
   447  	c.execStmt.CloseRecordSet(c.execStmt.Ctx.GetStochastikVars().TxnCtx.StartTS, nil)
   448  	return nil
   449  }
   450  
   451  func (a *InterDircStmt) handlePessimisticSelectForUFIDelate(ctx context.Context, e InterlockingDirectorate) (sqlexec.RecordSet, error) {
   452  	for {
   453  		rs, err := a.runPessimisticSelectForUFIDelate(ctx, e)
   454  		e, err = a.handlePessimisticLockError(ctx, err)
   455  		if err != nil {
   456  			return nil, err
   457  		}
   458  		if e == nil {
   459  			return rs, nil
   460  		}
   461  	}
   462  }
   463  
   464  func (a *InterDircStmt) runPessimisticSelectForUFIDelate(ctx context.Context, e InterlockingDirectorate) (sqlexec.RecordSet, error) {
   465  	defer func() {
   466  		terror.Log(e.Close())
   467  	}()
   468  	var rows []chunk.Event
   469  	var err error
   470  	req := newFirstChunk(e)
   471  	for {
   472  		err = Next(ctx, e, req)
   473  		if err != nil {
   474  			// Handle 'write conflict' error.
   475  			break
   476  		}
   477  		if req.NumEvents() == 0 {
   478  			fields := defCausNames2ResultFields(e.Schema(), a.OutputNames, a.Ctx.GetStochastikVars().CurrentDB)
   479  			return &chunkEventRecordSet{rows: rows, fields: fields, e: e, execStmt: a}, nil
   480  		}
   481  		iter := chunk.NewIterator4Chunk(req)
   482  		for r := iter.Begin(); r != iter.End(); r = iter.Next() {
   483  			rows = append(rows, r)
   484  		}
   485  		req = chunk.Renew(req, a.Ctx.GetStochastikVars().MaxChunkSize)
   486  	}
   487  	return nil, err
   488  }
   489  
   490  func (a *InterDircStmt) handleNoDelayInterlockingDirectorate(ctx context.Context, e InterlockingDirectorate) (sqlexec.RecordSet, error) {
   491  	sctx := a.Ctx
   492  	if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
   493  		span1 := span.Tracer().StartSpan("interlock.handleNoDelayInterlockingDirectorate", opentracing.ChildOf(span.Context()))
   494  		defer span1.Finish()
   495  		ctx = opentracing.ContextWithSpan(ctx, span1)
   496  	}
   497  
   498  	// Check if "milevadb_snapshot" is set for the write interlocks.
   499  	// In history read mode, we can not do write operations.
   500  	switch e.(type) {
   501  	case *DeleteInterDirc, *InsertInterDirc, *UFIDelateInterDirc, *ReplaceInterDirc, *LoadDataInterDirc, *DBSInterDirc:
   502  		snapshotTS := sctx.GetStochastikVars().SnapshotTS
   503  		if snapshotTS != 0 {
   504  			return nil, errors.New("can not execute write memex when 'milevadb_snapshot' is set")
   505  		}
   506  		lowResolutionTSO := sctx.GetStochastikVars().LowResolutionTSO
   507  		if lowResolutionTSO {
   508  			return nil, errors.New("can not execute write memex when 'milevadb_low_resolution_tso' is set")
   509  		}
   510  	}
   511  
   512  	var err error
   513  	defer func() {
   514  		terror.Log(e.Close())
   515  		a.logAudit()
   516  	}()
   517  
   518  	err = Next(ctx, e, newFirstChunk(e))
   519  	if err != nil {
   520  		return nil, err
   521  	}
   522  	return nil, err
   523  }
   524  
   525  func (a *InterDircStmt) handlePessimisticDML(ctx context.Context, e InterlockingDirectorate) error {
   526  	sctx := a.Ctx
   527  	// Do not active the transaction here.
   528  	// When autocommit = 0 and transaction in pessimistic mode,
   529  	// memexs like set xxx = xxx; should not active the transaction.
   530  	txn, err := sctx.Txn(false)
   531  	if err != nil {
   532  		return err
   533  	}
   534  	txnCtx := sctx.GetStochastikVars().TxnCtx
   535  	for {
   536  		startPointGetLocking := time.Now()
   537  		_, err = a.handleNoDelayInterlockingDirectorate(ctx, e)
   538  		if !txn.Valid() {
   539  			return err
   540  		}
   541  		if err != nil {
   542  			// It is possible the DML has point get plan that locks the key.
   543  			e, err = a.handlePessimisticLockError(ctx, err)
   544  			if err != nil {
   545  				if ErrDeadlock.Equal(err) {
   546  					metrics.StatementDeadlockDetectDuration.Observe(time.Since(startPointGetLocking).Seconds())
   547  				}
   548  				return err
   549  			}
   550  			continue
   551  		}
   552  		keys, err1 := txn.(pessimisticTxn).KeysNeedToLock()
   553  		if err1 != nil {
   554  			return err1
   555  		}
   556  		keys = txnCtx.DefCauslectUnchangedEventKeys(keys)
   557  		if len(keys) == 0 {
   558  			return nil
   559  		}
   560  		seVars := sctx.GetStochastikVars()
   561  		lockCtx := newLockCtx(seVars, seVars.LockWaitTimeout)
   562  		var lockKeyStats *execdetails.LockKeysDetails
   563  		ctx = context.WithValue(ctx, execdetails.LockKeysDetailCtxKey, &lockKeyStats)
   564  		startLocking := time.Now()
   565  		err = txn.LockKeys(ctx, lockCtx, keys...)
   566  		if lockKeyStats != nil {
   567  			seVars.StmtCtx.MergeLockKeysInterDircDetails(lockKeyStats)
   568  		}
   569  		if err == nil {
   570  			return nil
   571  		}
   572  		e, err = a.handlePessimisticLockError(ctx, err)
   573  		if err != nil {
   574  			if ErrDeadlock.Equal(err) {
   575  				metrics.StatementDeadlockDetectDuration.Observe(time.Since(startLocking).Seconds())
   576  			}
   577  			return err
   578  		}
   579  	}
   580  }
   581  
   582  // UFIDelateForUFIDelateTS uFIDelates the ForUFIDelateTS, if newForUFIDelateTS is 0, it obtain a new TS from FIDel.
   583  func UFIDelateForUFIDelateTS(seCtx stochastikctx.Context, newForUFIDelateTS uint64) error {
   584  	txn, err := seCtx.Txn(false)
   585  	if err != nil {
   586  		return err
   587  	}
   588  	if !txn.Valid() {
   589  		return errors.Trace(ekv.ErrInvalidTxn)
   590  	}
   591  	if newForUFIDelateTS == 0 {
   592  		version, err := seCtx.GetStore().CurrentVersion()
   593  		if err != nil {
   594  			return err
   595  		}
   596  		newForUFIDelateTS = version.Ver
   597  	}
   598  	seCtx.GetStochastikVars().TxnCtx.SetForUFIDelateTS(newForUFIDelateTS)
   599  	txn.SetOption(ekv.SnapshotTS, seCtx.GetStochastikVars().TxnCtx.GetForUFIDelateTS())
   600  	return nil
   601  }
   602  
   603  // handlePessimisticLockError uFIDelates TS and rebuild interlock if the err is write conflict.
   604  func (a *InterDircStmt) handlePessimisticLockError(ctx context.Context, err error) (InterlockingDirectorate, error) {
   605  	txnCtx := a.Ctx.GetStochastikVars().TxnCtx
   606  	var newForUFIDelateTS uint64
   607  	if deadlock, ok := errors.Cause(err).(*einsteindb.ErrDeadlock); ok {
   608  		if !deadlock.IsRetryable {
   609  			return nil, ErrDeadlock
   610  		}
   611  		logutil.Logger(ctx).Info("single memex deadlock, retry memex",
   612  			zap.Uint64("txn", txnCtx.StartTS),
   613  			zap.Uint64("lockTS", deadlock.LockTs),
   614  			zap.Stringer("lockKey", ekv.Key(deadlock.LockKey)),
   615  			zap.Uint64("deadlockKeyHash", deadlock.DeadlockKeyHash))
   616  	} else if terror.ErrorEqual(ekv.ErrWriteConflict, err) {
   617  		errStr := err.Error()
   618  		forUFIDelateTS := txnCtx.GetForUFIDelateTS()
   619  		logutil.Logger(ctx).Debug("pessimistic write conflict, retry memex",
   620  			zap.Uint64("txn", txnCtx.StartTS),
   621  			zap.Uint64("forUFIDelateTS", forUFIDelateTS),
   622  			zap.String("err", errStr))
   623  		// Always uFIDelate forUFIDelateTS by getting a new timestamp from FIDel.
   624  		// If we use the conflict commitTS as the new forUFIDelateTS and async commit
   625  		// is used, the commitTS of this transaction may exceed the max timestamp
   626  		// that FIDel allocates. Then, the change may be invisible to a new transaction,
   627  		// which means linearizability is broken.
   628  	} else {
   629  		// this branch if err not nil, always uFIDelate forUFIDelateTS to avoid problem described below
   630  		// for nowait, when ErrLock happened, ErrLockAcquireFailAndNoWaitSet will be returned, and in the same txn
   631  		// the select for uFIDelateTs must be uFIDelated, otherwise there maybe rollback problem.
   632  		// begin;  select for uFIDelate key1(here ErrLocked or other errors(or max_execution_time like soliton),
   633  		//         key1 dagger not get and async rollback key1 is raised)
   634  		//         select for uFIDelate key1 again(this time dagger succ(maybe dagger released by others))
   635  		//         the async rollback operation rollbacked the dagger just acquired
   636  		if err != nil {
   637  			tsErr := UFIDelateForUFIDelateTS(a.Ctx, 0)
   638  			if tsErr != nil {
   639  				logutil.Logger(ctx).Warn("UFIDelateForUFIDelateTS failed", zap.Error(tsErr))
   640  			}
   641  		}
   642  		return nil, err
   643  	}
   644  	if a.retryCount >= config.GetGlobalConfig().PessimisticTxn.MaxRetryCount {
   645  		return nil, errors.New("pessimistic dagger retry limit reached")
   646  	}
   647  	a.retryCount++
   648  	a.retryStartTime = time.Now()
   649  	err = UFIDelateForUFIDelateTS(a.Ctx, newForUFIDelateTS)
   650  	if err != nil {
   651  		return nil, err
   652  	}
   653  	e, err := a.buildInterlockingDirectorate()
   654  	if err != nil {
   655  		return nil, err
   656  	}
   657  	// Rollback the memex change before retry it.
   658  	a.Ctx.StmtRollback()
   659  	a.Ctx.GetStochastikVars().StmtCtx.ResetForRetry()
   660  
   661  	if err = e.Open(ctx); err != nil {
   662  		return nil, err
   663  	}
   664  	return e, nil
   665  }
   666  
   667  func extractConflictCommitTS(errStr string) uint64 {
   668  	strs := strings.Split(errStr, "conflictCommitTS=")
   669  	if len(strs) != 2 {
   670  		return 0
   671  	}
   672  	tsPart := strs[1]
   673  	length := strings.IndexByte(tsPart, ',')
   674  	if length < 0 {
   675  		return 0
   676  	}
   677  	tsStr := tsPart[:length]
   678  	ts, err := strconv.ParseUint(tsStr, 10, 64)
   679  	if err != nil {
   680  		return 0
   681  	}
   682  	return ts
   683  }
   684  
   685  type pessimisticTxn interface {
   686  	ekv.Transaction
   687  	// KeysNeedToLock returns the keys need to be locked.
   688  	KeysNeedToLock() ([]ekv.Key, error)
   689  }
   690  
   691  // buildInterlockingDirectorate build a interlock from plan, prepared memex may need additional procedure.
   692  func (a *InterDircStmt) buildInterlockingDirectorate() (InterlockingDirectorate, error) {
   693  	ctx := a.Ctx
   694  	stmtCtx := ctx.GetStochastikVars().StmtCtx
   695  	if _, ok := a.Causet.(*causetembedded.InterDircute); !ok {
   696  		// Do not sync transaction for InterDircute memex, because the real optimization work is done in
   697  		// "InterDircuteInterDirc.Build".
   698  		useMaxTS, err := causetembedded.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, a.Causet)
   699  		if err != nil {
   700  			return nil, err
   701  		}
   702  		if useMaxTS {
   703  			logutil.BgLogger().Debug("init txnStartTS with MaxUint64", zap.Uint64("conn", ctx.GetStochastikVars().ConnectionID), zap.String("text", a.Text))
   704  			err = ctx.InitTxnWithStartTS(math.MaxUint64)
   705  		} else if ctx.GetStochastikVars().SnapshotTS != 0 {
   706  			if _, ok := a.Causet.(*causetembedded.CheckBlock); ok {
   707  				err = ctx.InitTxnWithStartTS(ctx.GetStochastikVars().SnapshotTS)
   708  			}
   709  		}
   710  		if err != nil {
   711  			return nil, err
   712  		}
   713  
   714  		if stmtPri := stmtCtx.Priority; stmtPri == allegrosql.NoPriority {
   715  			switch {
   716  			case useMaxTS:
   717  				stmtCtx.Priority = ekv.PriorityHigh
   718  			case a.LowerPriority:
   719  				stmtCtx.Priority = ekv.PriorityLow
   720  			}
   721  		}
   722  	}
   723  	if _, ok := a.Causet.(*causetembedded.Analyze); ok && ctx.GetStochastikVars().InRestrictedALLEGROSQL {
   724  		ctx.GetStochastikVars().StmtCtx.Priority = ekv.PriorityLow
   725  	}
   726  
   727  	b := newInterlockingDirectorateBuilder(ctx, a.SchemaReplicant)
   728  	e := b.build(a.Causet)
   729  	if b.err != nil {
   730  		return nil, errors.Trace(b.err)
   731  	}
   732  
   733  	// InterDircuteInterDirc is not a real InterlockingDirectorate, we only use it to build another InterlockingDirectorate from a prepared memex.
   734  	if interlockInterDirc, ok := e.(*InterDircuteInterDirc); ok {
   735  		err := interlockInterDirc.Build(b)
   736  		if err != nil {
   737  			return nil, err
   738  		}
   739  		a.Ctx.SetValue(stochastikctx.QueryString, interlockInterDirc.stmt.Text())
   740  		a.OutputNames = interlockInterDirc.outputNames
   741  		a.isPreparedStmt = true
   742  		a.Causet = interlockInterDirc.plan
   743  		if interlockInterDirc.lowerPriority {
   744  			ctx.GetStochastikVars().StmtCtx.Priority = ekv.PriorityLow
   745  		}
   746  		e = interlockInterDirc.stmtInterDirc
   747  	}
   748  	a.isSelectForUFIDelate = b.hasLock && (!stmtCtx.InDeleteStmt && !stmtCtx.InUFIDelateStmt)
   749  	return e, nil
   750  }
   751  
   752  // QueryReplacer replaces new line and tab for grep result including query string.
   753  var QueryReplacer = strings.NewReplacer("\r", " ", "\n", " ", "\t", " ")
   754  
   755  func (a *InterDircStmt) logAudit() {
   756  	sessVars := a.Ctx.GetStochastikVars()
   757  	if sessVars.InRestrictedALLEGROSQL {
   758  		return
   759  	}
   760  	err := plugin.ForeachPlugin(plugin.Audit, func(p *plugin.Plugin) error {
   761  		audit := plugin.DeclareAuditManifest(p.Manifest)
   762  		if audit.OnGeneralEvent != nil {
   763  			cmd := allegrosql.Command2Str[byte(atomic.LoadUint32(&a.Ctx.GetStochastikVars().CommandValue))]
   764  			ctx := context.WithValue(context.Background(), plugin.InterDircStartTimeCtxKey, a.Ctx.GetStochastikVars().StartTime)
   765  			audit.OnGeneralEvent(ctx, sessVars, plugin.Log, cmd)
   766  		}
   767  		return nil
   768  	})
   769  	if err != nil {
   770  		log.Error("log audit log failure", zap.Error(err))
   771  	}
   772  }
   773  
   774  // FormatALLEGROSQL is used to format the original ALLEGROALLEGROSQL, e.g. truncating long ALLEGROALLEGROSQL, appending prepared arguments.
   775  func FormatALLEGROSQL(allegrosql string, pps variable.PreparedParams) stringutil.StringerFunc {
   776  	return func() string {
   777  		cfg := config.GetGlobalConfig()
   778  		length := len(allegrosql)
   779  		if maxQueryLen := atomic.LoadUint64(&cfg.Log.QueryLogMaxLen); uint64(length) > maxQueryLen {
   780  			allegrosql = fmt.Sprintf("%.*q(len:%d)", maxQueryLen, allegrosql, length)
   781  		}
   782  		return QueryReplacer.Replace(allegrosql) + pps.String()
   783  	}
   784  }
   785  
   786  var (
   787  	stochastikInterDircuteRunDurationInternal = metrics.StochastikInterDircuteRunDuration.WithLabelValues(metrics.LblInternal)
   788  	stochastikInterDircuteRunDurationGeneral  = metrics.StochastikInterDircuteRunDuration.WithLabelValues(metrics.LblGeneral)
   789  )
   790  
   791  // FinishInterDircuteStmt is used to record some information after `InterDircStmt` execution finished:
   792  // 1. record slow log if needed.
   793  // 2. record summary memex.
   794  // 3. record execute duration metric.
   795  // 4. uFIDelate the `PrevStmt` in stochastik variable.
   796  func (a *InterDircStmt) FinishInterDircuteStmt(txnTS uint64, succ bool, hasMoreResults bool) {
   797  	sessVars := a.Ctx.GetStochastikVars()
   798  	execDetail := sessVars.StmtCtx.GetInterDircDetails()
   799  	// Attach commit/lockKeys runtime stats to interlock runtime stats.
   800  	if (execDetail.CommitDetail != nil || execDetail.LockKeysDetail != nil) && sessVars.StmtCtx.RuntimeStatsDefCausl != nil {
   801  		statsWithCommit := &execdetails.RuntimeStatsWithCommit{
   802  			Commit:   execDetail.CommitDetail,
   803  			LockKeys: execDetail.LockKeysDetail,
   804  		}
   805  		sessVars.StmtCtx.RuntimeStatsDefCausl.RegisterStats(a.Causet.ID(), statsWithCommit)
   806  	}
   807  	// `LowSlowQuery` and `SummaryStmt` must be called before recording `PrevStmt`.
   808  	a.LogSlowQuery(txnTS, succ, hasMoreResults)
   809  	a.SummaryStmt(succ)
   810  	prevStmt := a.GetTextToLog()
   811  	if config.RedactLogEnabled() {
   812  		sessVars.PrevStmt = FormatALLEGROSQL(prevStmt, nil)
   813  	} else {
   814  		pps := types.CloneEvent(sessVars.PreparedParams)
   815  		sessVars.PrevStmt = FormatALLEGROSQL(prevStmt, pps)
   816  	}
   817  
   818  	executeDuration := time.Since(sessVars.StartTime) - sessVars.DurationCompile
   819  	if sessVars.InRestrictedALLEGROSQL {
   820  		stochastikInterDircuteRunDurationInternal.Observe(executeDuration.Seconds())
   821  	} else {
   822  		stochastikInterDircuteRunDurationGeneral.Observe(executeDuration.Seconds())
   823  	}
   824  }
   825  
   826  // CloseRecordSet will finish the execution of current memex and do some record work
   827  func (a *InterDircStmt) CloseRecordSet(txnStartTS uint64, lastErr error) {
   828  	a.FinishInterDircuteStmt(txnStartTS, lastErr == nil, false)
   829  	a.logAudit()
   830  	// Detach the Memory and disk tracker for the previous stmtCtx from GlobalMemoryUsageTracker and GlobalDiskUsageTracker
   831  	if stmtCtx := a.Ctx.GetStochastikVars().StmtCtx; stmtCtx != nil {
   832  		if stmtCtx.DiskTracker != nil {
   833  			stmtCtx.DiskTracker.DetachFromGlobalTracker()
   834  		}
   835  		if stmtCtx.MemTracker != nil {
   836  			stmtCtx.MemTracker.DetachFromGlobalTracker()
   837  		}
   838  	}
   839  }
   840  
   841  // LogSlowQuery is used to print the slow query in the log files.
   842  func (a *InterDircStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) {
   843  	sessVars := a.Ctx.GetStochastikVars()
   844  	level := log.GetLevel()
   845  	cfg := config.GetGlobalConfig()
   846  	costTime := time.Since(sessVars.StartTime) + sessVars.DurationParse
   847  	threshold := time.Duration(atomic.LoadUint64(&cfg.Log.SlowThreshold)) * time.Millisecond
   848  	enable := cfg.Log.EnableSlowLog
   849  	// if the level is Debug, print slow logs anyway
   850  	if (!enable || costTime < threshold) && level > zapembedded.DebugLevel {
   851  		return
   852  	}
   853  	var allegrosql stringutil.StringerFunc
   854  	normalizedALLEGROSQL, digest := sessVars.StmtCtx.ALLEGROSQLDigest()
   855  	if config.RedactLogEnabled() {
   856  		allegrosql = FormatALLEGROSQL(normalizedALLEGROSQL, nil)
   857  	} else if sensitiveStmt, ok := a.StmtNode.(ast.SensitiveStmtNode); ok {
   858  		allegrosql = FormatALLEGROSQL(sensitiveStmt.SecureText(), nil)
   859  	} else {
   860  		allegrosql = FormatALLEGROSQL(a.Text, sessVars.PreparedParams)
   861  	}
   862  
   863  	var blockIDs, indexNames string
   864  	if len(sessVars.StmtCtx.BlockIDs) > 0 {
   865  		blockIDs = strings.Replace(fmt.Sprintf("%v", sessVars.StmtCtx.BlockIDs), " ", ",", -1)
   866  	}
   867  	if len(sessVars.StmtCtx.IndexNames) > 0 {
   868  		indexNames = strings.Replace(fmt.Sprintf("%v", sessVars.StmtCtx.IndexNames), " ", ",", -1)
   869  	}
   870  	var stmtDetail execdetails.StmtInterDircDetails
   871  	stmtDetailRaw := a.GoCtx.Value(execdetails.StmtInterDircDetailKey)
   872  	if stmtDetailRaw != nil {
   873  		stmtDetail = *(stmtDetailRaw.(*execdetails.StmtInterDircDetails))
   874  	}
   875  	execDetail := sessVars.StmtCtx.GetInterDircDetails()
   876  	copTaskInfo := sessVars.StmtCtx.CausetTasksDetails()
   877  	statsInfos := causetembedded.GetStatsInfo(a.Causet)
   878  	memMax := sessVars.StmtCtx.MemTracker.MaxConsumed()
   879  	diskMax := sessVars.StmtCtx.DiskTracker.MaxConsumed()
   880  	_, planDigest := getCausetDigest(a.Ctx, a.Causet)
   881  	slowItems := &variable.SlowQueryLogItems{
   882  		TxnTS:                    txnTS,
   883  		ALLEGROALLEGROSQL:        allegrosql.String(),
   884  		Digest:                   digest,
   885  		TimeTotal:                costTime,
   886  		TimeParse:                sessVars.DurationParse,
   887  		TimeCompile:              sessVars.DurationCompile,
   888  		TimeOptimize:             sessVars.DurationOptimization,
   889  		TimeWaitTS:               sessVars.DurationWaitTS,
   890  		IndexNames:               indexNames,
   891  		StatsInfos:               statsInfos,
   892  		CausetTasks:              copTaskInfo,
   893  		InterDircDetail:          execDetail,
   894  		MemMax:                   memMax,
   895  		DiskMax:                  diskMax,
   896  		Succ:                     succ,
   897  		Causet:                   getCausetTree(a.Causet),
   898  		CausetDigest:             planDigest,
   899  		Prepared:                 a.isPreparedStmt,
   900  		HasMoreResults:           hasMoreResults,
   901  		CausetFromCache:          sessVars.FoundInCausetCache,
   902  		RewriteInfo:              sessVars.RewritePhaseInfo,
   903  		KVTotal:                  time.Duration(atomic.LoadInt64(&stmtDetail.WaitKVResFIDeluration)),
   904  		FIDelTotal:               time.Duration(atomic.LoadInt64(&stmtDetail.WaitFIDelResFIDeluration)),
   905  		BackoffTotal:             time.Duration(atomic.LoadInt64(&stmtDetail.BackoffDuration)),
   906  		WriteALLEGROSQLRespTotal: stmtDetail.WriteALLEGROSQLResFIDeluration,
   907  		InterDircRetryCount:      a.retryCount,
   908  	}
   909  	if a.retryCount > 0 {
   910  		slowItems.InterDircRetryTime = costTime - sessVars.DurationParse - sessVars.DurationCompile - time.Since(a.retryStartTime)
   911  	}
   912  	if _, ok := a.StmtNode.(*ast.CommitStmt); ok {
   913  		slowItems.PrevStmt = sessVars.PrevStmt.String()
   914  	}
   915  	if costTime < threshold {
   916  		logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(slowItems))
   917  	} else {
   918  		logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(slowItems))
   919  		metrics.TotalQueryProcHistogram.Observe(costTime.Seconds())
   920  		metrics.TotalCopProcHistogram.Observe(execDetail.ProcessTime.Seconds())
   921  		metrics.TotalCopWaitHistogram.Observe(execDetail.WaitTime.Seconds())
   922  		var userString string
   923  		if sessVars.User != nil {
   924  			userString = sessVars.User.String()
   925  		}
   926  		petri.GetPetri(a.Ctx).LogSlowQuery(&petri.SlowQueryInfo{
   927  			ALLEGROALLEGROSQL: allegrosql.String(),
   928  			Digest:            digest,
   929  			Start:             sessVars.StartTime,
   930  			Duration:          costTime,
   931  			Detail:            sessVars.StmtCtx.GetInterDircDetails(),
   932  			Succ:              succ,
   933  			ConnID:            sessVars.ConnectionID,
   934  			TxnTS:             txnTS,
   935  			User:              userString,
   936  			EDB:               sessVars.CurrentDB,
   937  			BlockIDs:          blockIDs,
   938  			IndexNames:        indexNames,
   939  			Internal:          sessVars.InRestrictedALLEGROSQL,
   940  		})
   941  	}
   942  }
   943  
   944  // getCausetTree will try to get the select plan tree if the plan is select or the select plan of delete/uFIDelate/insert memex.
   945  func getCausetTree(p causetembedded.Causet) string {
   946  	cfg := config.GetGlobalConfig()
   947  	if atomic.LoadUint32(&cfg.Log.RecordCausetInSlowLog) == 0 {
   948  		return ""
   949  	}
   950  	planTree := causetembedded.EncodeCauset(p)
   951  	if len(planTree) == 0 {
   952  		return planTree
   953  	}
   954  	return variable.SlowLogCausetPrefix + planTree + variable.SlowLogCausetSuffix
   955  }
   956  
   957  // getCausetDigest will try to get the select plan tree if the plan is select or the select plan of delete/uFIDelate/insert memex.
   958  func getCausetDigest(sctx stochastikctx.Context, p causetembedded.Causet) (normalized, planDigest string) {
   959  	normalized, planDigest = sctx.GetStochastikVars().StmtCtx.GetCausetDigest()
   960  	if len(normalized) > 0 {
   961  		return
   962  	}
   963  	normalized, planDigest = causetembedded.NormalizeCauset(p)
   964  	sctx.GetStochastikVars().StmtCtx.SetCausetDigest(normalized, planDigest)
   965  	return
   966  }
   967  
   968  // SummaryStmt defCauslects memexs for information_schema.memexs_summary
   969  func (a *InterDircStmt) SummaryStmt(succ bool) {
   970  	sessVars := a.Ctx.GetStochastikVars()
   971  	var userString string
   972  	if sessVars.User != nil {
   973  		userString = sessVars.User.Username
   974  	}
   975  
   976  	// Internal ALLEGROSQLs must also be recorded to keep the consistency of `PrevStmt` and `PrevStmtDigest`.
   977  	if !stmtsummary.StmtSummaryByDigestMap.Enabled() || ((sessVars.InRestrictedALLEGROSQL || len(userString) == 0) && !stmtsummary.StmtSummaryByDigestMap.EnabledInternal()) {
   978  		sessVars.SetPrevStmtDigest("")
   979  		return
   980  	}
   981  	// Ignore `PREPARE` memexs, but record `EXECUTE` memexs.
   982  	if _, ok := a.StmtNode.(*ast.PrepareStmt); ok {
   983  		return
   984  	}
   985  	stmtCtx := sessVars.StmtCtx
   986  	normalizedALLEGROSQL, digest := stmtCtx.ALLEGROSQLDigest()
   987  	costTime := time.Since(sessVars.StartTime) + sessVars.DurationParse
   988  
   989  	var prevALLEGROSQL, prevALLEGROSQLDigest string
   990  	if _, ok := a.StmtNode.(*ast.CommitStmt); ok {
   991  		// If prevALLEGROSQLDigest is not recorded, it means this `commit` is the first ALLEGROALLEGROSQL once stmt summary is enabled,
   992  		// so it's OK just to ignore it.
   993  		if prevALLEGROSQLDigest = sessVars.GetPrevStmtDigest(); len(prevALLEGROSQLDigest) == 0 {
   994  			return
   995  		}
   996  		prevALLEGROSQL = sessVars.PrevStmt.String()
   997  	}
   998  	sessVars.SetPrevStmtDigest(digest)
   999  
  1000  	// No need to encode every time, so encode lazily.
  1001  	planGenerator := func() string {
  1002  		return causetembedded.EncodeCauset(a.Causet)
  1003  	}
  1004  	// Generating plan digest is slow, only generate it once if it's 'Point_Get'.
  1005  	// If it's a point get, different ALLEGROSQLs leads to different plans, so ALLEGROALLEGROSQL digest
  1006  	// is enough to distinguish different plans in this case.
  1007  	var planDigest string
  1008  	var planDigestGen func() string
  1009  	if a.Causet.TP() == plancodec.TypePointGet {
  1010  		planDigestGen = func() string {
  1011  			_, planDigest := getCausetDigest(a.Ctx, a.Causet)
  1012  			return planDigest
  1013  		}
  1014  	} else {
  1015  		_, planDigest = getCausetDigest(a.Ctx, a.Causet)
  1016  	}
  1017  
  1018  	execDetail := stmtCtx.GetInterDircDetails()
  1019  	copTaskInfo := stmtCtx.CausetTasksDetails()
  1020  	memMax := stmtCtx.MemTracker.MaxConsumed()
  1021  	diskMax := stmtCtx.DiskTracker.MaxConsumed()
  1022  	allegrosql := a.GetTextToLog()
  1023  	stmtInterDircInfo := &stmtsummary.StmtInterDircInfo{
  1024  		SchemaName:           strings.ToLower(sessVars.CurrentDB),
  1025  		OriginalALLEGROSQL:   allegrosql,
  1026  		NormalizedALLEGROSQL: normalizedALLEGROSQL,
  1027  		Digest:               digest,
  1028  		PrevALLEGROSQL:       prevALLEGROSQL,
  1029  		PrevALLEGROSQLDigest: prevALLEGROSQLDigest,
  1030  		CausetGenerator:      planGenerator,
  1031  		CausetDigest:         planDigest,
  1032  		CausetDigestGen:      planDigestGen,
  1033  		User:                 userString,
  1034  		TotalLatency:         costTime,
  1035  		ParseLatency:         sessVars.DurationParse,
  1036  		CompileLatency:       sessVars.DurationCompile,
  1037  		StmtCtx:              stmtCtx,
  1038  		CausetTasks:          copTaskInfo,
  1039  		InterDircDetail:      &execDetail,
  1040  		MemMax:               memMax,
  1041  		DiskMax:              diskMax,
  1042  		StartTime:            sessVars.StartTime,
  1043  		IsInternal:           sessVars.InRestrictedALLEGROSQL,
  1044  		Succeed:              succ,
  1045  		CausetInCache:        sessVars.FoundInCausetCache,
  1046  		InterDircRetryCount:  a.retryCount,
  1047  	}
  1048  	if a.retryCount > 0 {
  1049  		stmtInterDircInfo.InterDircRetryTime = costTime - sessVars.DurationParse - sessVars.DurationCompile - time.Since(a.retryStartTime)
  1050  	}
  1051  	stmtsummary.StmtSummaryByDigestMap.AddStatement(stmtInterDircInfo)
  1052  }
  1053  
  1054  // GetTextToLog return the query text to log.
  1055  func (a *InterDircStmt) GetTextToLog() string {
  1056  	var allegrosql string
  1057  	if config.RedactLogEnabled() {
  1058  		allegrosql, _ = a.Ctx.GetStochastikVars().StmtCtx.ALLEGROSQLDigest()
  1059  	} else if sensitiveStmt, ok := a.StmtNode.(ast.SensitiveStmtNode); ok {
  1060  		allegrosql = sensitiveStmt.SecureText()
  1061  	} else {
  1062  		allegrosql = a.Text
  1063  	}
  1064  	return allegrosql
  1065  }