github.com/matrixorigin/matrixone@v1.2.0/pkg/sql/colexec/lockop/lock_op.go (about)

     1  // Copyright 2023 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package lockop
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"fmt"
    21  	"strings"
    22  	"time"
    23  
    24  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    25  	"github.com/matrixorigin/matrixone/pkg/common/reuse"
    26  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    27  	"github.com/matrixorigin/matrixone/pkg/container/types"
    28  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    29  	"github.com/matrixorigin/matrixone/pkg/lockservice"
    30  	"github.com/matrixorigin/matrixone/pkg/pb/lock"
    31  	"github.com/matrixorigin/matrixone/pkg/pb/pipeline"
    32  	"github.com/matrixorigin/matrixone/pkg/pb/timestamp"
    33  	"github.com/matrixorigin/matrixone/pkg/sql/plan"
    34  	"github.com/matrixorigin/matrixone/pkg/txn/client"
    35  	"github.com/matrixorigin/matrixone/pkg/txn/trace"
    36  	"github.com/matrixorigin/matrixone/pkg/vm"
    37  	"github.com/matrixorigin/matrixone/pkg/vm/engine"
    38  	"github.com/matrixorigin/matrixone/pkg/vm/process"
    39  	"go.uber.org/zap"
    40  )
    41  
    42  var (
    43  	retryError               = moerr.NewTxnNeedRetryNoCtx()
    44  	retryWithDefChangedError = moerr.NewTxnNeedRetryWithDefChangedNoCtx()
    45  )
    46  
    47  const argName = "lock_op"
    48  
    49  func (arg *Argument) String(buf *bytes.Buffer) {
    50  	buf.WriteString(argName)
    51  	buf.WriteString(": lock-op(")
    52  	n := len(arg.targets) - 1
    53  	for idx, target := range arg.targets {
    54  		buf.WriteString(fmt.Sprintf("%d-%d-%d",
    55  			target.tableID,
    56  			target.primaryColumnIndexInBatch,
    57  			target.refreshTimestampIndexInBatch))
    58  		if idx < n {
    59  			buf.WriteString(",")
    60  		}
    61  	}
    62  	buf.WriteString(")")
    63  }
    64  
    65  func (arg *Argument) Prepare(proc *process.Process) error {
    66  	arg.rt = &state{}
    67  	arg.rt.fetchers = make([]FetchLockRowsFunc, 0, len(arg.targets))
    68  	for idx := range arg.targets {
    69  		arg.rt.fetchers = append(arg.rt.fetchers,
    70  			GetFetchRowsFunc(arg.targets[idx].primaryColumnType))
    71  	}
    72  	arg.rt.parker = types.NewPacker(proc.Mp())
    73  	arg.rt.retryError = nil
    74  	arg.rt.step = stepLock
    75  	if arg.block {
    76  		arg.rt.InitReceiver(proc, true)
    77  	}
    78  	return nil
    79  }
    80  
    81  // Call the lock op is used to add locks into lockservice of the Table operated by the
    82  // current transaction under a pessimistic transaction.
    83  //
    84  // In RC's transaction mode, after successful locking, if an accessed data is found to be
    85  // concurrently modified by other transactions, a Timestamp column will be put on the output
    86  // vectors for querying the latest data, and subsequent op needs to check this column to check
    87  // whether the latest data needs to be read.
    88  func (arg *Argument) Call(proc *process.Process) (vm.CallResult, error) {
    89  	if err, isCancel := vm.CancelCheck(proc); isCancel {
    90  		return vm.CancelResult, err
    91  	}
    92  
    93  	txnOp := proc.TxnOperator
    94  	if !txnOp.Txn().IsPessimistic() {
    95  		return arg.GetChildren(0).Call(proc)
    96  	}
    97  
    98  	if !arg.block {
    99  		return callNonBlocking(proc, arg)
   100  	}
   101  
   102  	return callBlocking(proc, arg, arg.GetIsFirst(), arg.GetIsLast())
   103  }
   104  
   105  func callNonBlocking(
   106  	proc *process.Process,
   107  	arg *Argument) (vm.CallResult, error) {
   108  
   109  	result, err := arg.GetChildren(0).Call(proc)
   110  	if err != nil {
   111  		return result, err
   112  	}
   113  
   114  	anal := proc.GetAnalyze(arg.GetIdx(), arg.GetParallelIdx(), arg.GetParallelMajor())
   115  	anal.Start()
   116  	defer anal.Stop()
   117  
   118  	if result.Batch == nil {
   119  		return result, arg.rt.retryError
   120  	}
   121  	bat := result.Batch
   122  	if bat.IsEmpty() {
   123  		return result, err
   124  	}
   125  
   126  	if err := performLock(bat, proc, arg); err != nil {
   127  		return result, err
   128  	}
   129  
   130  	return result, nil
   131  }
   132  
   133  func callBlocking(
   134  	proc *process.Process,
   135  	arg *Argument,
   136  	isFirst bool,
   137  	_ bool) (vm.CallResult, error) {
   138  
   139  	anal := proc.GetAnalyze(arg.GetIdx(), arg.GetParallelIdx(), arg.GetParallelMajor())
   140  	anal.Start()
   141  	defer anal.Stop()
   142  
   143  	result := vm.NewCallResult()
   144  	if arg.rt.step == stepLock {
   145  		for {
   146  			bat, err := arg.getBatch(proc, anal, isFirst)
   147  			if err != nil {
   148  				return result, err
   149  			}
   150  
   151  			// no input batch any more, means all lock performed.
   152  			if bat == nil {
   153  				arg.rt.step = stepDownstream
   154  				if len(arg.rt.cachedBatches) == 0 {
   155  					arg.rt.step = stepEnd
   156  				}
   157  				break
   158  			}
   159  
   160  			// skip empty batch
   161  			if bat.IsEmpty() {
   162  				continue
   163  			}
   164  
   165  			if err := performLock(bat, proc, arg); err != nil {
   166  				return result, err
   167  			}
   168  
   169  			// blocking lock node. Never pass the input batch into downstream operators before
   170  			// all lock are performed.
   171  			arg.rt.cachedBatches = append(arg.rt.cachedBatches, bat)
   172  		}
   173  	}
   174  
   175  	if arg.rt.step == stepDownstream {
   176  		if arg.rt.retryError != nil {
   177  			arg.rt.step = stepEnd
   178  			return result, arg.rt.retryError
   179  		}
   180  
   181  		if len(arg.rt.cachedBatches) == 0 {
   182  			arg.rt.step = stepEnd
   183  		} else {
   184  			bat := arg.rt.cachedBatches[0]
   185  			arg.rt.cachedBatches = arg.rt.cachedBatches[1:]
   186  			result.Batch = bat
   187  			return result, nil
   188  		}
   189  	}
   190  
   191  	if arg.rt.step == stepEnd {
   192  		result.Status = vm.ExecStop
   193  		arg.cleanCachedBatch(proc)
   194  		return result, arg.rt.retryError
   195  	}
   196  
   197  	panic("BUG")
   198  }
   199  
   200  func performLock(
   201  	bat *batch.Batch,
   202  	proc *process.Process,
   203  	arg *Argument) error {
   204  	needRetry := false
   205  	for idx, target := range arg.targets {
   206  		if proc.TxnOperator.LockSkipped(target.tableID, target.mode) {
   207  			return nil
   208  		}
   209  		getLogger().Debug("lock",
   210  			zap.Uint64("table", target.tableID),
   211  			zap.Bool("filter", target.filter != nil),
   212  			zap.Int32("filter-col", target.filterColIndexInBatch),
   213  			zap.Int32("primary-index", target.primaryColumnIndexInBatch))
   214  		var filterCols []int32
   215  		priVec := bat.GetVector(target.primaryColumnIndexInBatch)
   216  		// For partitioned tables, filter is not nil
   217  		if target.filter != nil {
   218  			filterCols = vector.MustFixedCol[int32](bat.GetVector(target.filterColIndexInBatch))
   219  			for _, value := range filterCols {
   220  				// has Illegal Partition index
   221  				if value == -1 {
   222  					return moerr.NewInvalidInput(proc.Ctx, "Table has no partition for value from column_list")
   223  				}
   224  			}
   225  		}
   226  		locked, defChanged, refreshTS, err := doLock(
   227  			proc.Ctx,
   228  			arg.engine,
   229  			nil,
   230  			target.tableID,
   231  			proc,
   232  			priVec,
   233  			target.primaryColumnType,
   234  			DefaultLockOptions(arg.rt.parker).
   235  				WithLockMode(lock.LockMode_Exclusive).
   236  				WithFetchLockRowsFunc(arg.rt.fetchers[idx]).
   237  				WithMaxBytesPerLock(int(proc.LockService.GetConfig().MaxLockRowCount)).
   238  				WithFilterRows(target.filter, filterCols).
   239  				WithLockTable(target.lockTable, target.changeDef).
   240  				WithHasNewVersionInRangeFunc(arg.rt.hasNewVersionInRange),
   241  		)
   242  		if getLogger().Enabled(zap.DebugLevel) {
   243  			getLogger().Debug("lock result",
   244  				zap.Uint64("table", target.tableID),
   245  				zap.Bool("locked", locked),
   246  				zap.Int32("primary-index", target.primaryColumnIndexInBatch),
   247  				zap.String("refresh-ts", refreshTS.DebugString()),
   248  				zap.Error(err))
   249  		}
   250  		if err != nil {
   251  			return err
   252  		}
   253  		if !locked {
   254  			continue
   255  		}
   256  
   257  		// refreshTS is last commit ts + 1, because we need see the committed data.
   258  		if proc.TxnClient.RefreshExpressionEnabled() &&
   259  			target.refreshTimestampIndexInBatch != -1 {
   260  			vec := bat.GetVector(target.refreshTimestampIndexInBatch)
   261  			ts := types.BuildTS(refreshTS.PhysicalTime, refreshTS.LogicalTime)
   262  			n := priVec.Length()
   263  			for i := 0; i < n; i++ {
   264  				vector.AppendFixed(vec, ts, false, proc.Mp())
   265  			}
   266  			continue
   267  		}
   268  
   269  		// if need to retry, do not return the retry error immediately, first try to get all
   270  		// the locks to avoid another conflict when retrying
   271  		if !needRetry && !refreshTS.IsEmpty() {
   272  			needRetry = true
   273  		}
   274  		if !arg.rt.defChanged {
   275  			arg.rt.defChanged = defChanged
   276  		}
   277  	}
   278  	// when a transaction needs to operate on many data, there may be multiple conflicts on the
   279  	// data, and if you go to retry every time a conflict occurs, you will also encounter conflicts
   280  	// when you retry. We need to return the conflict after all the locks have been added successfully,
   281  	// so that the retry will definitely succeed because all the locks have been put.
   282  	if needRetry && arg.rt.retryError == nil {
   283  		arg.rt.retryError = retryError
   284  	}
   285  	if arg.rt.defChanged {
   286  		arg.rt.retryError = retryWithDefChangedError
   287  	}
   288  	return nil
   289  }
   290  
   291  // LockTable lock table, all rows in the table will be locked, and wait current txn
   292  // closed.
   293  func LockTable(
   294  	eng engine.Engine,
   295  	proc *process.Process,
   296  	tableID uint64,
   297  	pkType types.Type,
   298  	changeDef bool) error {
   299  	txnOp := proc.TxnOperator
   300  	if !txnOp.Txn().IsPessimistic() {
   301  		return nil
   302  	}
   303  	parker := types.NewPacker(proc.Mp())
   304  	defer parker.FreeMem()
   305  
   306  	opts := DefaultLockOptions(parker).
   307  		WithLockTable(true, changeDef).
   308  		WithFetchLockRowsFunc(GetFetchRowsFunc(pkType))
   309  	_, defChanged, refreshTS, err := doLock(
   310  		proc.Ctx,
   311  		eng,
   312  		nil,
   313  		tableID,
   314  		proc,
   315  		nil,
   316  		pkType,
   317  		opts)
   318  	if err != nil {
   319  		return err
   320  	}
   321  	// If the returned timestamp is not empty, we should return a retry error,
   322  	if !refreshTS.IsEmpty() {
   323  		if !defChanged {
   324  			return retryError
   325  		}
   326  		return retryWithDefChangedError
   327  	}
   328  	return nil
   329  }
   330  
   331  // LockRow lock rows in table, rows will be locked, and wait current txn closed.
   332  func LockRows(
   333  	eng engine.Engine,
   334  	proc *process.Process,
   335  	rel engine.Relation,
   336  	tableID uint64,
   337  	vec *vector.Vector,
   338  	pkType types.Type,
   339  	lockMode lock.LockMode,
   340  	sharding lock.Sharding,
   341  	group uint32,
   342  ) error {
   343  	txnOp := proc.TxnOperator
   344  	if !txnOp.Txn().IsPessimistic() {
   345  		return nil
   346  	}
   347  
   348  	parker := types.NewPacker(proc.Mp())
   349  	defer parker.FreeMem()
   350  
   351  	opts := DefaultLockOptions(parker).
   352  		WithLockTable(false, false).
   353  		WithLockSharding(sharding).
   354  		WithLockMode(lockMode).
   355  		WithLockGroup(group).
   356  		WithFetchLockRowsFunc(GetFetchRowsFunc(pkType))
   357  	_, defChanged, refreshTS, err := doLock(
   358  		proc.Ctx,
   359  		eng,
   360  		rel,
   361  		tableID,
   362  		proc,
   363  		vec,
   364  		pkType,
   365  		opts)
   366  	if err != nil {
   367  		return err
   368  	}
   369  	// If the returned timestamp is not empty, we should return a retry error,
   370  	if !refreshTS.IsEmpty() {
   371  		if !defChanged {
   372  			return retryError
   373  		}
   374  		return retryWithDefChangedError
   375  	}
   376  	return nil
   377  }
   378  
   379  // doLock locks a set of data so that no other transaction can modify it.
   380  // The data is described by the primary key. When the returned timestamp.IsEmpty
   381  // is false, it means there is a conflict with other transactions and the data to
   382  // be manipulated has been modified, you need to get the latest data at timestamp.
   383  func doLock(
   384  	ctx context.Context,
   385  	eng engine.Engine,
   386  	rel engine.Relation,
   387  	tableID uint64,
   388  	proc *process.Process,
   389  	vec *vector.Vector,
   390  	pkType types.Type,
   391  	opts LockOptions) (bool, bool, timestamp.Timestamp, error) {
   392  	txnOp := proc.TxnOperator
   393  	txnClient := proc.TxnClient
   394  	lockService := proc.LockService
   395  
   396  	if !txnOp.Txn().IsPessimistic() {
   397  		return false, false, timestamp.Timestamp{}, nil
   398  	}
   399  
   400  	seq := txnOp.NextSequence()
   401  	startAt := time.Now()
   402  	trace.GetService().AddTxnDurationAction(
   403  		txnOp,
   404  		client.LockEvent,
   405  		seq,
   406  		tableID,
   407  		0,
   408  		nil)
   409  
   410  	//in this case:
   411  	// create table t1 (a int primary key, b int ,c int, unique key(b,c));
   412  	// insert into t1 values (1,1,null);
   413  	// update t1 set b = b+1 where a = 1;
   414  	//    here MO will use 't1 left join hidden_tbl' to fetch the PK in hidden table to lock,
   415  	//    but the result will be ConstNull vector
   416  	if vec != nil && vec.IsConstNull() {
   417  		return false, false, timestamp.Timestamp{}, nil
   418  	}
   419  
   420  	if opts.maxCountPerLock == 0 {
   421  		opts.maxCountPerLock = int(lockService.GetConfig().MaxLockRowCount)
   422  	}
   423  	fetchFunc := opts.fetchFunc
   424  	if fetchFunc == nil {
   425  		fetchFunc = GetFetchRowsFunc(pkType)
   426  	}
   427  
   428  	has, rows, g := fetchFunc(
   429  		vec,
   430  		opts.parker,
   431  		pkType,
   432  		opts.maxCountPerLock,
   433  		opts.lockTable,
   434  		opts.filter,
   435  		opts.filterCols)
   436  	if !has {
   437  		return false, false, timestamp.Timestamp{}, nil
   438  	}
   439  
   440  	txn := txnOp.Txn()
   441  	options := lock.LockOptions{
   442  		Granularity:     g,
   443  		Policy:          proc.WaitPolicy,
   444  		Mode:            opts.mode,
   445  		TableDefChanged: opts.changeDef,
   446  		Sharding:        opts.sharding,
   447  		Group:           opts.group,
   448  		SnapShotTs:      txnOp.CreateTS(),
   449  	}
   450  	if txn.Mirror {
   451  		options.ForwardTo = txn.LockService
   452  		if options.ForwardTo == "" {
   453  			panic("forward to empty lock service")
   454  		}
   455  	} else {
   456  		// FIXME: in launch model, multi-cn will use same process level runtime. So lockservice will be wrong.
   457  		if txn.LockService != lockService.GetServiceID() {
   458  			lockService = lockservice.GetLockServiceByServiceID(txn.LockService)
   459  		}
   460  	}
   461  
   462  	key := txnOp.AddWaitLock(tableID, rows, options)
   463  	defer txnOp.RemoveWaitLock(key)
   464  
   465  	var err error
   466  	var result lock.Result
   467  	for {
   468  		result, err = lockService.Lock(
   469  			ctx,
   470  			tableID,
   471  			rows,
   472  			txn.ID,
   473  			options)
   474  		if !canRetryLock(txnOp, err) {
   475  			break
   476  		}
   477  	}
   478  	if err != nil {
   479  		return false, false, timestamp.Timestamp{}, err
   480  	}
   481  
   482  	if len(result.ConflictKey) > 0 {
   483  		trace.GetService().AddTxnActionInfo(
   484  			txnOp,
   485  			client.LockEvent,
   486  			seq,
   487  			tableID,
   488  			func(writer trace.Writer) {
   489  				writer.WriteHex(result.ConflictKey)
   490  				writer.WriteString(":")
   491  				writer.WriteHex(result.ConflictTxn)
   492  				writer.WriteString("/")
   493  				writer.WriteUint(uint64(result.Waiters))
   494  				if len(result.PrevWaiter) > 0 {
   495  					writer.WriteString("/")
   496  					writer.WriteHex(result.PrevWaiter)
   497  				}
   498  			},
   499  		)
   500  	}
   501  
   502  	trace.GetService().AddTxnDurationAction(
   503  		txnOp,
   504  		client.LockEvent,
   505  		seq,
   506  		tableID,
   507  		time.Since(startAt),
   508  		nil)
   509  
   510  	// add bind locks
   511  	if err = txnOp.AddLockTable(result.LockedOn); err != nil {
   512  		return false, false, timestamp.Timestamp{}, err
   513  	}
   514  
   515  	snapshotTS := txnOp.Txn().SnapshotTS
   516  	// if has no conflict, lockedTS means the latest commit ts of this table
   517  	lockedTS := result.Timestamp
   518  
   519  	// if no conflict, maybe data has been updated in [snapshotTS, lockedTS]. So wen need check here
   520  	if !result.HasConflict &&
   521  		snapshotTS.LessEq(lockedTS) && // only retry when snapshotTS <= lockedTS, means lost some update in rc mode.
   522  		!txnOp.IsRetry() &&
   523  		txnOp.Txn().IsRCIsolation() {
   524  
   525  		// wait last committed logtail applied
   526  		newSnapshotTS, err := txnClient.WaitLogTailAppliedAt(ctx, lockedTS)
   527  		if err != nil {
   528  			return false, false, timestamp.Timestamp{}, err
   529  		}
   530  
   531  		fn := opts.hasNewVersionInRangeFunc
   532  		if fn == nil {
   533  			fn = hasNewVersionInRange
   534  		}
   535  
   536  		// if [snapshotTS, newSnapshotTS] has been modified, need retry at new snapshot ts
   537  		changed, err := fn(proc, rel, tableID, eng, vec, snapshotTS, newSnapshotTS)
   538  		if err != nil {
   539  			return false, false, timestamp.Timestamp{}, err
   540  		}
   541  
   542  		if changed {
   543  			trace.GetService().TxnNoConflictChanged(
   544  				proc.TxnOperator,
   545  				tableID,
   546  				lockedTS,
   547  				newSnapshotTS)
   548  			if err := txnOp.UpdateSnapshot(ctx, newSnapshotTS); err != nil {
   549  				return false, false, timestamp.Timestamp{}, err
   550  			}
   551  			return true, false, newSnapshotTS, nil
   552  		}
   553  	}
   554  
   555  	// no conflict or has conflict, but all prev txn all aborted
   556  	// current txn can read and write normally
   557  	if !result.HasConflict ||
   558  		!result.HasPrevCommit {
   559  		return true, false, timestamp.Timestamp{}, nil
   560  	} else if lockedTS.Less(snapshotTS) {
   561  		return true, false, timestamp.Timestamp{}, nil
   562  	}
   563  
   564  	// Arriving here means that at least one of the conflicting
   565  	// transactions has committed.
   566  	//
   567  	// For the RC schema we need some retries between
   568  	// [txn.snapshot ts, prev.commit ts] (de-duplication for insert, re-query for
   569  	// update and delete).
   570  	//
   571  	// For the SI schema the current transaction needs to be abort (TODO: later
   572  	// we can consider recording the ReadSet of the transaction and check if data
   573  	// is modified between [snapshotTS,prev.commits] and raise the SnapshotTS of
   574  	// the SI transaction to eliminate conflicts)
   575  	if !txnOp.Txn().IsRCIsolation() {
   576  		return false, false, timestamp.Timestamp{}, moerr.NewTxnWWConflict(ctx, tableID, "SI not support retry")
   577  	}
   578  
   579  	// forward rc's snapshot ts
   580  	snapshotTS = result.Timestamp.Next()
   581  
   582  	trace.GetService().TxnConflictChanged(
   583  		proc.TxnOperator,
   584  		tableID,
   585  		snapshotTS)
   586  	if err := txnOp.UpdateSnapshot(ctx, snapshotTS); err != nil {
   587  		return false, false, timestamp.Timestamp{}, err
   588  	}
   589  	return true, result.TableDefChanged, snapshotTS, nil
   590  }
   591  
   592  func canRetryLock(txn client.TxnOperator, err error) bool {
   593  	if moerr.IsMoErrCode(err, moerr.ErrRetryForCNRollingRestart) {
   594  		return true
   595  	}
   596  	if !moerr.IsMoErrCode(err, moerr.ErrLockTableBindChanged) ||
   597  		!moerr.IsMoErrCode(err, moerr.ErrLockTableNotFound) {
   598  		return false
   599  	}
   600  	if txn.LockTableCount() == 0 {
   601  		return true
   602  	}
   603  	return false
   604  }
   605  
   606  // DefaultLockOptions create a default lock operation. The parker is used to
   607  // encode primary key into lock row.
   608  func DefaultLockOptions(parker *types.Packer) LockOptions {
   609  	return LockOptions{
   610  		mode:            lock.LockMode_Exclusive,
   611  		lockTable:       false,
   612  		maxCountPerLock: 0,
   613  		parker:          parker,
   614  	}
   615  }
   616  
   617  // WithLockSharding set lock sharding
   618  func (opts LockOptions) WithLockSharding(sharding lock.Sharding) LockOptions {
   619  	opts.sharding = sharding
   620  	return opts
   621  }
   622  
   623  // WithLockGroup set lock group
   624  func (opts LockOptions) WithLockGroup(group uint32) LockOptions {
   625  	opts.group = group
   626  	return opts
   627  }
   628  
   629  // WithLockMode set lock mode, Exclusive or Shared
   630  func (opts LockOptions) WithLockMode(mode lock.LockMode) LockOptions {
   631  	opts.mode = mode
   632  	return opts
   633  }
   634  
   635  // WithLockTable set lock all table
   636  func (opts LockOptions) WithLockTable(lockTable, changeDef bool) LockOptions {
   637  	opts.lockTable = lockTable
   638  	opts.changeDef = changeDef
   639  	return opts
   640  }
   641  
   642  // WithMaxBytesPerLock every lock operation, will add some lock rows into
   643  // lockservice. If very many rows of data are added at once, this can result
   644  // in an excessive memory footprint. This value limits the amount of lock memory
   645  // that can be allocated per lock operation, and if it is exceeded, it will be
   646  // converted to a range lock.
   647  func (opts LockOptions) WithMaxBytesPerLock(maxBytesPerLock int) LockOptions {
   648  	opts.maxCountPerLock = maxBytesPerLock
   649  	return opts
   650  }
   651  
   652  // WithFetchLockRowsFunc set the primary key into lock rows conversion function.
   653  func (opts LockOptions) WithFetchLockRowsFunc(fetchFunc FetchLockRowsFunc) LockOptions {
   654  	opts.fetchFunc = fetchFunc
   655  	return opts
   656  }
   657  
   658  // WithFilterRows set filter rows, filterCols used to rowsFilter func
   659  func (opts LockOptions) WithFilterRows(
   660  	filter RowsFilter,
   661  	filterCols []int32) LockOptions {
   662  	opts.filter = filter
   663  	opts.filterCols = filterCols
   664  	return opts
   665  }
   666  
   667  // WithHasNewVersionInRangeFunc setup hasNewVersionInRange func
   668  func (opts LockOptions) WithHasNewVersionInRangeFunc(fn hasNewVersionInRangeFunc) LockOptions {
   669  	opts.hasNewVersionInRangeFunc = fn
   670  	return opts
   671  }
   672  
   673  // NewArgument create new lock op argument.
   674  func NewArgumentByEngine(engine engine.Engine) *Argument {
   675  	arg := reuse.Alloc[Argument](nil)
   676  	arg.engine = engine
   677  	return arg
   678  }
   679  
   680  // Block return if lock operator is a blocked node.
   681  func (arg *Argument) Block() bool {
   682  	return arg.block
   683  }
   684  
   685  // SetBlock set the lock op is blocked. If true lock op will block the current pipeline, and cache
   686  // all input batches. And wait for all the input's batch to be locked before outputting the cached batch
   687  // to the downstream operator. E.g. select for update, only we get all lock result, then select can be
   688  // performed, otherwise, if we need retry in RC mode, we may get wrong result.
   689  func (arg *Argument) SetBlock(block bool) *Argument {
   690  	arg.block = block
   691  	return arg
   692  }
   693  
   694  // AddLockTarget add lock targets
   695  func (arg *Argument) CopyToPipelineTarget() []*pipeline.LockTarget {
   696  	targets := make([]*pipeline.LockTarget, len(arg.targets))
   697  	for i, target := range arg.targets {
   698  		targets[i] = &pipeline.LockTarget{
   699  			TableId:            target.tableID,
   700  			PrimaryColIdxInBat: target.primaryColumnIndexInBatch,
   701  			PrimaryColTyp:      plan.MakePlan2Type(&target.primaryColumnType),
   702  			RefreshTsIdxInBat:  target.refreshTimestampIndexInBatch,
   703  			FilterColIdxInBat:  target.filterColIndexInBatch,
   704  			LockTable:          target.lockTable,
   705  			ChangeDef:          target.changeDef,
   706  			Mode:               target.mode,
   707  		}
   708  	}
   709  	return targets
   710  }
   711  
   712  // AddLockTarget add lock target, LockMode_Exclusive will used
   713  func (arg *Argument) AddLockTarget(
   714  	tableID uint64,
   715  	primaryColumnIndexInBatch int32,
   716  	primaryColumnType types.Type,
   717  	refreshTimestampIndexInBatch int32) *Argument {
   718  	return arg.AddLockTargetWithMode(
   719  		tableID,
   720  		lock.LockMode_Exclusive,
   721  		primaryColumnIndexInBatch,
   722  		primaryColumnType,
   723  		refreshTimestampIndexInBatch)
   724  }
   725  
   726  // AddLockTargetWithMode add lock target with lock mode
   727  func (arg *Argument) AddLockTargetWithMode(
   728  	tableID uint64,
   729  	mode lock.LockMode,
   730  	primaryColumnIndexInBatch int32,
   731  	primaryColumnType types.Type,
   732  	refreshTimestampIndexInBatch int32) *Argument {
   733  	arg.targets = append(arg.targets, lockTarget{
   734  		tableID:                      tableID,
   735  		primaryColumnIndexInBatch:    primaryColumnIndexInBatch,
   736  		primaryColumnType:            primaryColumnType,
   737  		refreshTimestampIndexInBatch: refreshTimestampIndexInBatch,
   738  		mode:                         mode,
   739  	})
   740  	return arg
   741  }
   742  
   743  // LockTable lock all table, used for delete, truncate and drop table
   744  func (arg *Argument) LockTable(
   745  	tableID uint64,
   746  	changeDef bool) *Argument {
   747  	return arg.LockTableWithMode(
   748  		tableID,
   749  		lock.LockMode_Exclusive,
   750  		changeDef)
   751  }
   752  
   753  // LockTableWithMode is similar to LockTable, but with specify
   754  // lock mode
   755  func (arg *Argument) LockTableWithMode(
   756  	tableID uint64,
   757  	mode lock.LockMode,
   758  	changeDef bool) *Argument {
   759  	for idx := range arg.targets {
   760  		if arg.targets[idx].tableID == tableID {
   761  			arg.targets[idx].lockTable = true
   762  			arg.targets[idx].changeDef = changeDef
   763  			arg.targets[idx].mode = mode
   764  			break
   765  		}
   766  	}
   767  	return arg
   768  }
   769  
   770  // AddLockTargetWithPartition add lock targets for partition tables. Our partitioned table implementation
   771  // has each partition as a separate table. So when modifying data, these rows may belong to different
   772  // partitions. For lock op does not care about the logic of data and partition mapping calculation, the
   773  // caller needs to tell the lock op.
   774  //
   775  // tableIDs: the set of ids of the sub-tables of the partition to which the data of the current operation is
   776  // attributed after calculation.
   777  //
   778  // partitionTableIDMappingInBatch: the ID index of the sub-table corresponding to the data. Index of tableIDs
   779  func (arg *Argument) AddLockTargetWithPartition(
   780  	tableIDs []uint64,
   781  	primaryColumnIndexInBatch int32,
   782  	primaryColumnType types.Type,
   783  	refreshTimestampIndexInBatch int32,
   784  	partitionTableIDMappingInBatch int32) *Argument {
   785  	return arg.AddLockTargetWithPartitionAndMode(
   786  		tableIDs,
   787  		lock.LockMode_Exclusive,
   788  		primaryColumnIndexInBatch,
   789  		primaryColumnType,
   790  		refreshTimestampIndexInBatch,
   791  		partitionTableIDMappingInBatch)
   792  }
   793  
   794  // AddLockTargetWithPartitionAndMode is similar to AddLockTargetWithPartition, but you can specify
   795  // the lock mode
   796  func (arg *Argument) AddLockTargetWithPartitionAndMode(
   797  	tableIDs []uint64,
   798  	mode lock.LockMode,
   799  	primaryColumnIndexInBatch int32,
   800  	primaryColumnType types.Type,
   801  	refreshTimestampIndexInBatch int32,
   802  	partitionTableIDMappingInBatch int32) *Argument {
   803  	if len(tableIDs) == 0 {
   804  		panic("invalid partition table ids")
   805  	}
   806  
   807  	// only one partition table, process as normal table
   808  	if len(tableIDs) == 1 {
   809  		return arg.AddLockTarget(tableIDs[0],
   810  			primaryColumnIndexInBatch,
   811  			primaryColumnType,
   812  			refreshTimestampIndexInBatch,
   813  		)
   814  	}
   815  
   816  	for _, tableID := range tableIDs {
   817  		arg.targets = append(arg.targets, lockTarget{
   818  			tableID:                      tableID,
   819  			primaryColumnIndexInBatch:    primaryColumnIndexInBatch,
   820  			primaryColumnType:            primaryColumnType,
   821  			refreshTimestampIndexInBatch: refreshTimestampIndexInBatch,
   822  			filter:                       getRowsFilter(tableID, tableIDs),
   823  			filterColIndexInBatch:        partitionTableIDMappingInBatch,
   824  			mode:                         mode,
   825  		})
   826  	}
   827  	return arg
   828  }
   829  
   830  // Free free mem
   831  func (arg *Argument) Free(proc *process.Process, pipelineFailed bool, err error) {
   832  	if arg.rt == nil {
   833  		return
   834  	}
   835  	if arg.rt.parker != nil {
   836  		arg.rt.parker.FreeMem()
   837  	}
   838  	arg.rt.retryError = nil
   839  	arg.cleanCachedBatch(proc)
   840  	arg.rt.FreeMergeTypeOperator(pipelineFailed)
   841  }
   842  
   843  func (arg *Argument) cleanCachedBatch(_ *process.Process) {
   844  	// do not need clean,  only set nil
   845  	// for _, bat := range arg.rt.cachedBatches {
   846  	// 	bat.Clean(proc.Mp())
   847  	// }
   848  	arg.rt.cachedBatches = nil
   849  }
   850  
   851  func (arg *Argument) getBatch(
   852  	_ *process.Process,
   853  	anal process.Analyze,
   854  	isFirst bool) (*batch.Batch, error) {
   855  	fn := arg.rt.batchFetchFunc
   856  	if fn == nil {
   857  		fn = arg.rt.ReceiveFromAllRegs
   858  	}
   859  
   860  	bat, end, err := fn(anal)
   861  	if err != nil {
   862  		return nil, err
   863  	}
   864  	if end {
   865  		return nil, nil
   866  	}
   867  	anal.Input(bat, isFirst)
   868  	return bat, nil
   869  }
   870  
   871  func getRowsFilter(
   872  	tableID uint64,
   873  	partitionTables []uint64) RowsFilter {
   874  	return func(
   875  		row int,
   876  		filterCols []int32) bool {
   877  		return partitionTables[filterCols[row]] == tableID
   878  	}
   879  }
   880  
   881  // [from, to].
   882  // 1. if has a mvcc record <= from, return false, means no changed
   883  // 2. otherwise return true, changed
   884  func hasNewVersionInRange(
   885  	proc *process.Process,
   886  	rel engine.Relation,
   887  	tableID uint64,
   888  	eng engine.Engine,
   889  	vec *vector.Vector,
   890  	from, to timestamp.Timestamp) (bool, error) {
   891  	if vec == nil {
   892  		return false, nil
   893  	}
   894  
   895  	if rel == nil {
   896  		var err error
   897  		txnOp := proc.TxnOperator
   898  		_, _, rel, err = eng.GetRelationById(proc.Ctx, txnOp, tableID)
   899  		if err != nil {
   900  			if strings.Contains(err.Error(), "can not find table by id") {
   901  				return false, nil
   902  			}
   903  			return false, err
   904  		}
   905  	}
   906  	fromTS := types.BuildTS(from.PhysicalTime, from.LogicalTime)
   907  	toTS := types.BuildTS(to.PhysicalTime, to.LogicalTime)
   908  	return rel.PrimaryKeysMayBeModified(proc.Ctx, fromTS, toTS, vec)
   909  }