github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/causetstore/milevadb-server/2pc_test.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package einsteindb
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"fmt"
    20  	"math"
    21  	"math/rand"
    22  	"strings"
    23  	"sync"
    24  	"sync/atomic"
    25  	"time"
    26  
    27  	. "github.com/whtcorpsinc/check"
    28  	"github.com/whtcorpsinc/ekvproto/pkg/ekvrpcpb"
    29  	"github.com/whtcorpsinc/errors"
    30  	"github.com/whtcorpsinc/failpoint"
    31  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb/einsteindbrpc"
    32  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb/oracle"
    33  	"github.com/whtcorpsinc/milevadb/causetstore/mockstore/cluster"
    34  	"github.com/whtcorpsinc/milevadb/causetstore/mockstore/mockeinsteindb"
    35  	"github.com/whtcorpsinc/milevadb/config"
    36  	"github.com/whtcorpsinc/milevadb/ekv"
    37  )
    38  
    39  type testCommitterSuite struct {
    40  	OneByOneSuite
    41  	cluster     cluster.Cluster
    42  	causetstore *einsteindbStore
    43  }
    44  
    45  var _ = SerialSuites(&testCommitterSuite{})
    46  
    47  func (s *testCommitterSuite) SetUpSuite(c *C) {
    48  	atomic.StoreUint64(&ManagedLockTTL, 3000) // 3s
    49  	s.OneByOneSuite.SetUpSuite(c)
    50  	atomic.StoreUint64(&CommitMaxBackoff, 1000)
    51  }
    52  
    53  func (s *testCommitterSuite) SetUpTest(c *C) {
    54  	mvccStore, err := mockeinsteindb.NewMVCCLevelDB("")
    55  	c.Assert(err, IsNil)
    56  	cluster := mockeinsteindb.NewCluster(mvccStore)
    57  	mockeinsteindb.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c"))
    58  	s.cluster = cluster
    59  	client := mockeinsteindb.NewRPCClient(cluster, mvccStore)
    60  	FIDelCli := &codecFIDelClient{mockeinsteindb.NewFIDelClient(cluster)}
    61  	spekv := NewMockSafePointKV()
    62  	causetstore, err := newEinsteinDBStore("mockeinsteindb-causetstore", FIDelCli, spekv, client, false, nil)
    63  	causetstore.EnableTxnLocalLatches(1024000)
    64  	c.Assert(err, IsNil)
    65  
    66  	// TODO: make it possible
    67  	// causetstore, err := mockstore.NewMockStore(
    68  	// 	mockstore.WithStoreType(mockstore.MockEinsteinDB),
    69  	// 	mockstore.WithClusterInspector(func(c cluster.Cluster) {
    70  	// 		mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c"))
    71  	// 		s.cluster = c
    72  	// 	}),
    73  	// 	mockstore.WithFIDelClientHijacker(func(c fidel.Client) fidel.Client {
    74  	// 		return &codecFIDelClient{c}
    75  	// 	}),
    76  	// 	mockstore.WithTxnLocalLatches(1024000),
    77  	// )
    78  	// c.Assert(err, IsNil)
    79  
    80  	s.causetstore = causetstore
    81  }
    82  
    83  func (s *testCommitterSuite) TearDownSuite(c *C) {
    84  	atomic.StoreUint64(&CommitMaxBackoff, 20000)
    85  	s.causetstore.Close()
    86  	s.OneByOneSuite.TearDownSuite(c)
    87  }
    88  
    89  func (s *testCommitterSuite) begin(c *C) *einsteindbTxn {
    90  	txn, err := s.causetstore.Begin()
    91  	c.Assert(err, IsNil)
    92  	return txn.(*einsteindbTxn)
    93  }
    94  
    95  func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
    96  	txn := s.begin(c)
    97  	for k, v := range m {
    98  		val, err := txn.Get(context.TODO(), []byte(k))
    99  		c.Assert(err, IsNil)
   100  		c.Assert(string(val), Equals, v)
   101  	}
   102  }
   103  
   104  func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
   105  	txn := s.begin(c)
   106  	for k, v := range m {
   107  		err := txn.Set([]byte(k), []byte(v))
   108  		c.Assert(err, IsNil)
   109  	}
   110  	err := txn.Commit(context.Background())
   111  	c.Assert(err, IsNil)
   112  
   113  	s.checkValues(c, m)
   114  }
   115  
   116  func randKV(keyLen, valLen int) (string, string) {
   117  	const letters = "abc"
   118  	k, v := make([]byte, keyLen), make([]byte, valLen)
   119  	for i := range k {
   120  		k[i] = letters[rand.Intn(len(letters))]
   121  	}
   122  	for i := range v {
   123  		v[i] = letters[rand.Intn(len(letters))]
   124  	}
   125  	return string(k), string(v)
   126  }
   127  
   128  func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) {
   129  	conf := *config.GetGlobalConfig()
   130  	oldConf := conf
   131  	defer config.StoreGlobalConfig(&oldConf)
   132  	conf.EinsteinDBClient.TTLRefreshedTxnSize = 0
   133  	config.StoreGlobalConfig(&conf)
   134  	bo := NewBackofferWithVars(context.Background(), getMaxBackoff, nil)
   135  
   136  	{
   137  		txn := s.begin(c)
   138  		err := txn.GetMemBuffer().SetWithFlags(ekv.Key("bb"), []byte{0}, ekv.SetPresumeKeyNotExists)
   139  		c.Assert(err, IsNil)
   140  		err = txn.Set(ekv.Key("ba"), []byte{1})
   141  		c.Assert(err, IsNil)
   142  		err = txn.Delete(ekv.Key("bb"))
   143  		c.Assert(err, IsNil)
   144  		committer, err := newTwoPhaseCommitterWithInit(txn, 0)
   145  		c.Assert(err, IsNil)
   146  		err = committer.prewriteMutations(bo, committer.mutations)
   147  		c.Assert(err, IsNil)
   148  		state := atomic.LoadUint32((*uint32)(&committer.ttlManager.state))
   149  		c.Check(state, Equals, uint32(stateRunning))
   150  	}
   151  
   152  	{
   153  		txn := s.begin(c)
   154  		err := txn.GetMemBuffer().SetWithFlags(ekv.Key("dd"), []byte{0}, ekv.SetPresumeKeyNotExists)
   155  		c.Assert(err, IsNil)
   156  		err = txn.Set(ekv.Key("de"), []byte{1})
   157  		c.Assert(err, IsNil)
   158  		err = txn.Delete(ekv.Key("dd"))
   159  		c.Assert(err, IsNil)
   160  		committer, err := newTwoPhaseCommitterWithInit(txn, 0)
   161  		c.Assert(err, IsNil)
   162  		err = committer.prewriteMutations(bo, committer.mutations)
   163  		c.Assert(err, IsNil)
   164  		state := atomic.LoadUint32((*uint32)(&committer.ttlManager.state))
   165  		c.Check(state, Equals, uint32(stateRunning))
   166  	}
   167  }
   168  
   169  func (s *testCommitterSuite) TestCommitRollback(c *C) {
   170  	s.mustCommit(c, map[string]string{
   171  		"a": "a",
   172  		"b": "b",
   173  		"c": "c",
   174  	})
   175  
   176  	txn := s.begin(c)
   177  	txn.Set([]byte("a"), []byte("a1"))
   178  	txn.Set([]byte("b"), []byte("b1"))
   179  	txn.Set([]byte("c"), []byte("c1"))
   180  
   181  	s.mustCommit(c, map[string]string{
   182  		"c": "c2",
   183  	})
   184  
   185  	err := txn.Commit(context.Background())
   186  	c.Assert(err, NotNil)
   187  
   188  	s.checkValues(c, map[string]string{
   189  		"a": "a",
   190  		"b": "b",
   191  		"c": "c2",
   192  	})
   193  }
   194  
   195  func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
   196  	s.mustCommit(c, map[string]string{
   197  		"a": "a0",
   198  		"b": "b0",
   199  	})
   200  	ctx := context.Background()
   201  	txn1 := s.begin(c)
   202  	err := txn1.Set([]byte("a"), []byte("a1"))
   203  	c.Assert(err, IsNil)
   204  	err = txn1.Set([]byte("b"), []byte("b1"))
   205  	c.Assert(err, IsNil)
   206  	committer, err := newTwoPhaseCommitterWithInit(txn1, 0)
   207  	c.Assert(err, IsNil)
   208  	err = committer.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), committer.mutations)
   209  	c.Assert(err, IsNil)
   210  
   211  	txn2 := s.begin(c)
   212  	v, err := txn2.Get(context.TODO(), []byte("a"))
   213  	c.Assert(err, IsNil)
   214  	c.Assert(v, BytesEquals, []byte("a0"))
   215  
   216  	err = committer.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), committer.mutations)
   217  	if err != nil {
   218  		// Retry.
   219  		txn1 = s.begin(c)
   220  		err = txn1.Set([]byte("a"), []byte("a1"))
   221  		c.Assert(err, IsNil)
   222  		err = txn1.Set([]byte("b"), []byte("b1"))
   223  		c.Assert(err, IsNil)
   224  		committer, err = newTwoPhaseCommitterWithInit(txn1, 0)
   225  		c.Assert(err, IsNil)
   226  		err = committer.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), committer.mutations)
   227  		c.Assert(err, IsNil)
   228  	}
   229  	committer.commitTS, err = s.causetstore.oracle.GetTimestamp(ctx)
   230  	c.Assert(err, IsNil)
   231  	err = committer.commitMutations(NewBackofferWithVars(ctx, int(atomic.LoadUint64(&CommitMaxBackoff)), nil), CommitterMutations{keys: [][]byte{[]byte("a")}})
   232  	c.Assert(err, IsNil)
   233  
   234  	txn3 := s.begin(c)
   235  	v, err = txn3.Get(context.TODO(), []byte("b"))
   236  	c.Assert(err, IsNil)
   237  	c.Assert(v, BytesEquals, []byte("b1"))
   238  }
   239  
   240  func (s *testCommitterSuite) TestContextCancel(c *C) {
   241  	txn1 := s.begin(c)
   242  	err := txn1.Set([]byte("a"), []byte("a1"))
   243  	c.Assert(err, IsNil)
   244  	err = txn1.Set([]byte("b"), []byte("b1"))
   245  	c.Assert(err, IsNil)
   246  	committer, err := newTwoPhaseCommitterWithInit(txn1, 0)
   247  	c.Assert(err, IsNil)
   248  
   249  	bo := NewBackofferWithVars(context.Background(), PrewriteMaxBackoff, nil)
   250  	backoffer, cancel := bo.Fork()
   251  	cancel() // cancel the context
   252  	err = committer.prewriteMutations(backoffer, committer.mutations)
   253  	c.Assert(errors.Cause(err), Equals, context.Canceled)
   254  }
   255  
   256  func (s *testCommitterSuite) TestContextCancel2(c *C) {
   257  	txn := s.begin(c)
   258  	err := txn.Set([]byte("a"), []byte("a"))
   259  	c.Assert(err, IsNil)
   260  	err = txn.Set([]byte("b"), []byte("b"))
   261  	c.Assert(err, IsNil)
   262  	ctx, cancel := context.WithCancel(context.Background())
   263  	err = txn.Commit(ctx)
   264  	c.Assert(err, IsNil)
   265  	cancel()
   266  	// Secondary keys should not be canceled.
   267  	time.Sleep(time.Millisecond * 20)
   268  	c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
   269  }
   270  
   271  func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
   272  	txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
   273  	// txn1 locks "b"
   274  	err := txn1.Set([]byte("b"), []byte("b1"))
   275  	c.Assert(err, IsNil)
   276  	committer, err := newTwoPhaseCommitterWithInit(txn1, 0)
   277  	c.Assert(err, IsNil)
   278  	err = committer.prewriteMutations(NewBackofferWithVars(context.Background(), PrewriteMaxBackoff, nil), committer.mutations)
   279  	c.Assert(err, IsNil)
   280  	// txn3 writes "c"
   281  	err = txn3.Set([]byte("c"), []byte("c3"))
   282  	c.Assert(err, IsNil)
   283  	err = txn3.Commit(context.Background())
   284  	c.Assert(err, IsNil)
   285  	// txn2 writes "a"(PK), "b", "c" on different regions.
   286  	// "c" will return a retryable error.
   287  	// "b" will get a Locked error first, then the context must be canceled after backoff for dagger.
   288  	err = txn2.Set([]byte("a"), []byte("a2"))
   289  	c.Assert(err, IsNil)
   290  	err = txn2.Set([]byte("b"), []byte("b2"))
   291  	c.Assert(err, IsNil)
   292  	err = txn2.Set([]byte("c"), []byte("c2"))
   293  	c.Assert(err, IsNil)
   294  	err = txn2.Commit(context.Background())
   295  	c.Assert(err, NotNil)
   296  	c.Assert(ekv.ErrWriteConflictInMilevaDB.Equal(err), IsTrue, Commentf("err: %s", err))
   297  }
   298  
   299  func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
   300  	loc, err := s.causetstore.regionCache.LocateKey(NewBackofferWithVars(context.Background(), getMaxBackoff, nil), key)
   301  	c.Assert(err, IsNil)
   302  	return loc.Region.id
   303  }
   304  
   305  func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
   306  	ver, err := s.causetstore.CurrentVersion()
   307  	c.Assert(err, IsNil)
   308  	bo := NewBackofferWithVars(context.Background(), getMaxBackoff, nil)
   309  	req := einsteindbrpc.NewRequest(einsteindbrpc.CmdGet, &ekvrpcpb.GetRequest{
   310  		Key:     key,
   311  		Version: ver.Ver,
   312  	})
   313  	loc, err := s.causetstore.regionCache.LocateKey(bo, key)
   314  	c.Assert(err, IsNil)
   315  	resp, err := s.causetstore.SendReq(bo, req, loc.Region, readTimeoutShort)
   316  	c.Assert(err, IsNil)
   317  	c.Assert(resp.Resp, NotNil)
   318  	keyErr := (resp.Resp.(*ekvrpcpb.GetResponse)).GetError()
   319  	return keyErr.GetLocked() != nil
   320  }
   321  
   322  func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
   323  	// Setup region delays for key "b" and "c".
   324  	delays := map[uint64]time.Duration{
   325  		s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
   326  		s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
   327  	}
   328  	s.causetstore.client = &slowClient{
   329  		Client:       s.causetstore.client,
   330  		regionDelays: delays,
   331  	}
   332  
   333  	txn1, txn2 := s.begin(c), s.begin(c)
   334  	// txn2 writes "b"
   335  	err := txn2.Set([]byte("b"), []byte("b2"))
   336  	c.Assert(err, IsNil)
   337  	err = txn2.Commit(context.Background())
   338  	c.Assert(err, IsNil)
   339  	// txn1 writes "a"(PK), "b", "c" on different regions.
   340  	// "b" will return an error and cancel commit.
   341  	err = txn1.Set([]byte("a"), []byte("a1"))
   342  	c.Assert(err, IsNil)
   343  	err = txn1.Set([]byte("b"), []byte("b1"))
   344  	c.Assert(err, IsNil)
   345  	err = txn1.Set([]byte("c"), []byte("c1"))
   346  	c.Assert(err, IsNil)
   347  	err = txn1.Commit(context.Background())
   348  	c.Assert(err, NotNil)
   349  	// "c" should be cleaned up in reasonable time.
   350  	for i := 0; i < 50; i++ {
   351  		if !s.isKeyLocked(c, []byte("c")) {
   352  			return
   353  		}
   354  		time.Sleep(time.Millisecond * 10)
   355  	}
   356  	c.Fail()
   357  }
   358  
   359  // slowClient wraps rpcClient and makes some regions respond with delay.
   360  type slowClient struct {
   361  	Client
   362  	regionDelays map[uint64]time.Duration
   363  }
   364  
   365  func (c *slowClient) SendReq(ctx context.Context, addr string, req *einsteindbrpc.Request, timeout time.Duration) (*einsteindbrpc.Response, error) {
   366  	for id, delay := range c.regionDelays {
   367  		reqCtx := &req.Context
   368  		if reqCtx.GetRegionId() == id {
   369  			time.Sleep(delay)
   370  		}
   371  	}
   372  	return c.Client.SendRequest(ctx, addr, req, timeout)
   373  }
   374  
   375  func (s *testCommitterSuite) TestIllegalTso(c *C) {
   376  	txn := s.begin(c)
   377  	data := map[string]string{
   378  		"name": "aa",
   379  		"age":  "12",
   380  	}
   381  	for k, v := range data {
   382  		err := txn.Set([]byte(k), []byte(v))
   383  		c.Assert(err, IsNil)
   384  	}
   385  	// make start ts bigger.
   386  	txn.startTS = uint64(math.MaxUint64)
   387  	err := txn.Commit(context.Background())
   388  	c.Assert(err, NotNil)
   389  	errMsgMustContain(c, err, "invalid txnStartTS")
   390  }
   391  
   392  func errMsgMustContain(c *C, err error, msg string) {
   393  	c.Assert(strings.Contains(err.Error(), msg), IsTrue)
   394  }
   395  
   396  func newTwoPhaseCommitterWithInit(txn *einsteindbTxn, connID uint64) (*twoPhaseCommitter, error) {
   397  	c, err := newTwoPhaseCommitter(txn, connID)
   398  	if err != nil {
   399  		return nil, errors.Trace(err)
   400  	}
   401  	if err = c.initKeysAndMutations(); err != nil {
   402  		return nil, errors.Trace(err)
   403  	}
   404  	return c, nil
   405  }
   406  
   407  func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
   408  	txn := s.begin(c)
   409  	err := txn.Set([]byte("a"), []byte("a1"))
   410  	c.Assert(err, IsNil)
   411  	committer, err := newTwoPhaseCommitterWithInit(txn, 0)
   412  	c.Assert(err, IsNil)
   413  	ctx := context.Background()
   414  	err = committer.cleanupMutations(NewBackofferWithVars(ctx, cleanupMaxBackoff, nil), committer.mutations)
   415  	c.Assert(err, IsNil)
   416  	err = committer.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), committer.mutations)
   417  	c.Assert(err, NotNil)
   418  	errMsgMustContain(c, err, "already rolled back")
   419  }
   420  
   421  func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
   422  	// commit (a,a1)
   423  	txn1 := s.begin(c)
   424  	err := txn1.Set([]byte("a"), []byte("a1"))
   425  	c.Assert(err, IsNil)
   426  	err = txn1.Commit(context.Background())
   427  	c.Assert(err, IsNil)
   428  
   429  	// check a
   430  	txn := s.begin(c)
   431  	v, err := txn.Get(context.TODO(), []byte("a"))
   432  	c.Assert(err, IsNil)
   433  	c.Assert(v, BytesEquals, []byte("a1"))
   434  
   435  	// set txn2's startTs before txn1's
   436  	txn2 := s.begin(c)
   437  	txn2.startTS = txn1.startTS - 1
   438  	err = txn2.Set([]byte("a"), []byte("a2"))
   439  	c.Assert(err, IsNil)
   440  	err = txn2.Set([]byte("b"), []byte("b2"))
   441  	c.Assert(err, IsNil)
   442  	// prewrite:primary a failed, b success
   443  	err = txn2.Commit(context.Background())
   444  	c.Assert(err, NotNil)
   445  
   446  	// txn2 failed with a rollback for record a.
   447  	txn = s.begin(c)
   448  	v, err = txn.Get(context.TODO(), []byte("a"))
   449  	c.Assert(err, IsNil)
   450  	c.Assert(v, BytesEquals, []byte("a1"))
   451  	_, err = txn.Get(context.TODO(), []byte("b"))
   452  	errMsgMustContain(c, err, "key not exist")
   453  
   454  	// clean again, shouldn't be failed when a rollback already exist.
   455  	ctx := context.Background()
   456  	committer, err := newTwoPhaseCommitterWithInit(txn2, 0)
   457  	c.Assert(err, IsNil)
   458  	err = committer.cleanupMutations(NewBackofferWithVars(ctx, cleanupMaxBackoff, nil), committer.mutations)
   459  	c.Assert(err, IsNil)
   460  
   461  	// check the data after rollback twice.
   462  	txn = s.begin(c)
   463  	v, err = txn.Get(context.TODO(), []byte("a"))
   464  	c.Assert(err, IsNil)
   465  	c.Assert(v, BytesEquals, []byte("a1"))
   466  
   467  	// uFIDelate data in a new txn, should be success.
   468  	err = txn.Set([]byte("a"), []byte("a3"))
   469  	c.Assert(err, IsNil)
   470  	err = txn.Commit(context.Background())
   471  	c.Assert(err, IsNil)
   472  	// check value
   473  	txn = s.begin(c)
   474  	v, err = txn.Get(context.TODO(), []byte("a"))
   475  	c.Assert(err, IsNil)
   476  	c.Assert(v, BytesEquals, []byte("a3"))
   477  }
   478  
   479  func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
   480  	// This test checks that when there is a write conflict, written keys is collected,
   481  	// so we can use it to clean up keys.
   482  	region, _ := s.cluster.GetRegionByKey([]byte("x"))
   483  	newRegionID := s.cluster.AllocID()
   484  	newPeerID := s.cluster.AllocID()
   485  	s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
   486  	var totalTime time.Duration
   487  	for i := 0; i < 10; i++ {
   488  		txn1 := s.begin(c)
   489  		txn2 := s.begin(c)
   490  		txn2.Set([]byte("x1"), []byte("1"))
   491  		committer2, err := newTwoPhaseCommitterWithInit(txn2, 2)
   492  		c.Assert(err, IsNil)
   493  		err = committer2.execute(context.Background())
   494  		c.Assert(err, IsNil)
   495  		txn1.Set([]byte("x1"), []byte("1"))
   496  		txn1.Set([]byte("y1"), []byte("2"))
   497  		committer1, err := newTwoPhaseCommitterWithInit(txn1, 2)
   498  		c.Assert(err, IsNil)
   499  		err = committer1.execute(context.Background())
   500  		c.Assert(err, NotNil)
   501  		committer1.cleanWg.Wait()
   502  		txn3 := s.begin(c)
   503  		start := time.Now()
   504  		txn3.Get(context.TODO(), []byte("y1"))
   505  		totalTime += time.Since(start)
   506  		txn3.Commit(context.Background())
   507  	}
   508  	c.Assert(totalTime, Less, time.Millisecond*200)
   509  }
   510  
   511  func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
   512  	// Prepare two regions first: (, 100) and [100, )
   513  	region, _ := s.cluster.GetRegionByKey([]byte{50})
   514  	newRegionID := s.cluster.AllocID()
   515  	newPeerID := s.cluster.AllocID()
   516  	s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
   517  
   518  	txn := s.begin(c)
   519  	var val [1024]byte
   520  	for i := byte(50); i < 120; i++ {
   521  		err := txn.Set([]byte{i}, val[:])
   522  		c.Assert(err, IsNil)
   523  	}
   524  
   525  	committer, err := newTwoPhaseCommitterWithInit(txn, 1)
   526  	c.Assert(err, IsNil)
   527  
   528  	ctx := context.Background()
   529  	err = committer.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), committer.mutations)
   530  	c.Assert(err, IsNil)
   531  
   532  	// Check the written locks in the first region (50 keys)
   533  	for i := byte(50); i < 100; i++ {
   534  		dagger := s.getLockInfo(c, []byte{i})
   535  		c.Assert(int(dagger.TxnSize), Equals, 50)
   536  	}
   537  
   538  	// Check the written locks in the second region (20 keys)
   539  	for i := byte(100); i < 120; i++ {
   540  		dagger := s.getLockInfo(c, []byte{i})
   541  		c.Assert(int(dagger.TxnSize), Equals, 20)
   542  	}
   543  }
   544  
   545  func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
   546  	txn := s.begin(c)
   547  	c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
   548  
   549  	committer, err := newTwoPhaseCommitterWithInit(txn, 1)
   550  	c.Assert(err, IsNil)
   551  	bo := NewBackofferWithVars(context.Background(), getMaxBackoff, nil)
   552  	loc, err := s.causetstore.regionCache.LocateKey(bo, []byte("x"))
   553  	c.Assert(err, IsNil)
   554  	mutations := []*ekvrpcpb.Mutation{
   555  		{
   556  			Op:    committer.mutations.ops[0],
   557  			Key:   committer.mutations.keys[0],
   558  			Value: committer.mutations.values[0],
   559  		},
   560  	}
   561  	prewrite := &ekvrpcpb.PrewriteRequest{
   562  		Mutations:    mutations,
   563  		PrimaryLock:  committer.primary(),
   564  		StartVersion: committer.startTS,
   565  		LockTtl:      committer.lockTTL,
   566  		MinCommitTs:  committer.startTS + 100, // Set minCommitTS
   567  	}
   568  	req := einsteindbrpc.NewRequest(einsteindbrpc.CmdPrewrite, prewrite)
   569  	_, err = s.causetstore.SendReq(bo, req, loc.Region, readTimeoutShort)
   570  	c.Assert(err, IsNil)
   571  
   572  	// Make commitTS less than minCommitTS.
   573  	committer.commitTS = committer.startTS + 1
   574  	// Ensure that the new commit ts is greater than minCommitTS when retry
   575  	time.Sleep(3 * time.Millisecond)
   576  	err = committer.commitMutations(bo, committer.mutations)
   577  	c.Assert(err, IsNil)
   578  
   579  	// Use startTS+2 to read the data and get nothing.
   580  	// Use max.Uint64 to read the data and success.
   581  	// That means the final commitTS > startTS+2, it's not the one we provide.
   582  	// So we cover the rety commitTS logic.
   583  	txn1, err := s.causetstore.BeginWithStartTS(committer.startTS + 2)
   584  	c.Assert(err, IsNil)
   585  	_, err = txn1.Get(bo.ctx, []byte("x"))
   586  	c.Assert(ekv.IsErrNotFound(err), IsTrue)
   587  
   588  	txn2, err := s.causetstore.BeginWithStartTS(math.MaxUint64)
   589  	c.Assert(err, IsNil)
   590  	val, err := txn2.Get(bo.ctx, []byte("x"))
   591  	c.Assert(err, IsNil)
   592  	c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
   593  }
   594  
   595  func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
   596  	// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic dagger.
   597  	txn := s.begin(c)
   598  	txn.SetOption(ekv.Pessimistic, true)
   599  	err := txn.Set([]byte("t1"), []byte("v1"))
   600  	c.Assert(err, IsNil)
   601  	committer, err := newTwoPhaseCommitterWithInit(txn, 0)
   602  	c.Assert(err, IsNil)
   603  	committer.forUFIDelateTS = 100
   604  	var batch batchMutations
   605  	batch.mutations = committer.mutations.subRange(0, 1)
   606  	batch.region = RegionVerID{1, 1, 1}
   607  	req := committer.buildPrewriteRequest(batch, 1)
   608  	c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
   609  	c.Assert(req.Prewrite().ForUFIDelateTs, Equals, uint64(100))
   610  }
   611  
   612  func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
   613  	// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic dagger.
   614  	key := ekv.Key("key")
   615  	txn := s.begin(c)
   616  	c.Assert(txn.Set(key, key), IsNil)
   617  	c.Assert(txn.Commit(context.Background()), IsNil)
   618  
   619  	txn = s.begin(c)
   620  	txn.SetOption(ekv.Pessimistic, true)
   621  	_, _ = txn.us.Get(context.TODO(), key)
   622  	c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, ekv.SetPresumeKeyNotExists), IsNil)
   623  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn.startTS, WaitStartTime: time.Now()}
   624  	err := txn.LockKeys(context.Background(), lockCtx, key)
   625  	c.Assert(err, NotNil)
   626  	c.Assert(txn.Delete(key), IsNil)
   627  	key2 := ekv.Key("key2")
   628  	c.Assert(txn.Set(key2, key2), IsNil)
   629  	err = txn.Commit(context.Background())
   630  	c.Assert(err, IsNil)
   631  }
   632  
   633  func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
   634  	txn := s.begin(c)
   635  	txn.SetOption(ekv.Pessimistic, true)
   636  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: 100, WaitStartTime: time.Now()}
   637  	err := txn.LockKeys(context.Background(), lockCtx, ekv.Key("abc"), ekv.Key("def"))
   638  	c.Assert(err, IsNil)
   639  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: 100, WaitStartTime: time.Now()}
   640  	err = txn.LockKeys(context.Background(), lockCtx, ekv.Key("abc"), ekv.Key("def"))
   641  	c.Assert(err, IsNil)
   642  	c.Assert(txn.collectLockedKeys(), HasLen, 2)
   643  }
   644  
   645  func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
   646  	key := ekv.Key("key")
   647  	txn := s.begin(c)
   648  	txn.SetOption(ekv.Pessimistic, true)
   649  	time.Sleep(time.Millisecond * 100)
   650  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn.startTS, WaitStartTime: time.Now()}
   651  	err := txn.LockKeys(context.Background(), lockCtx, key)
   652  	c.Assert(err, IsNil)
   653  	time.Sleep(time.Millisecond * 100)
   654  	key2 := ekv.Key("key2")
   655  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn.startTS, WaitStartTime: time.Now()}
   656  	err = txn.LockKeys(context.Background(), lockCtx, key2)
   657  	c.Assert(err, IsNil)
   658  	lockInfo := s.getLockInfo(c, key)
   659  	msBeforeLockExpired := s.causetstore.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl)
   660  	c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
   661  
   662  	lr := newLockResolver(s.causetstore)
   663  	bo := NewBackofferWithVars(context.Background(), getMaxBackoff, nil)
   664  	status, err := lr.getTxnStatus(bo, txn.startTS, key2, 0, txn.startTS, true)
   665  	c.Assert(err, IsNil)
   666  	c.Assert(status.ttl, GreaterEqual, lockInfo.LockTtl)
   667  
   668  	// Check primary dagger TTL is auto increasing while the pessimistic txn is ongoing.
   669  	for i := 0; i < 50; i++ {
   670  		lockInfoNew := s.getLockInfo(c, key)
   671  		if lockInfoNew.LockTtl > lockInfo.LockTtl {
   672  			currentTS, err := lr.causetstore.GetOracle().GetTimestamp(bo.ctx)
   673  			c.Assert(err, IsNil)
   674  			// Check that the TTL is uFIDelate to a reasonable range.
   675  			expire := oracle.ExtractPhysical(txn.startTS) + int64(lockInfoNew.LockTtl)
   676  			now := oracle.ExtractPhysical(currentTS)
   677  			c.Assert(expire > now, IsTrue)
   678  			c.Assert(uint64(expire-now) <= atomic.LoadUint64(&ManagedLockTTL), IsTrue)
   679  			return
   680  		}
   681  		time.Sleep(100 * time.Millisecond)
   682  	}
   683  	c.Assert(false, IsTrue, Commentf("uFIDelate pessimistic ttl fail"))
   684  }
   685  
   686  func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
   687  	key := ekv.Key("key")
   688  	key2 := ekv.Key("key2")
   689  	txn := s.begin(c)
   690  	c.Assert(txn.Set(key, key), IsNil)
   691  	c.Assert(txn.Set(key2, key2), IsNil)
   692  	c.Assert(txn.Commit(context.Background()), IsNil)
   693  	txn = s.begin(c)
   694  	txn.SetOption(ekv.Pessimistic, true)
   695  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn.startTS, WaitStartTime: time.Now()}
   696  	lockCtx.ReturnValues = true
   697  	lockCtx.Values = map[string]ekv.ReturnedValue{}
   698  	c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
   699  	c.Assert(lockCtx.Values, HasLen, 2)
   700  	c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, []byte(key))
   701  	c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, []byte(key2))
   702  }
   703  
   704  // TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
   705  func (s *testCommitterSuite) TestElapsedTTL(c *C) {
   706  	key := ekv.Key("key")
   707  	txn := s.begin(c)
   708  	txn.startTS = oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1)
   709  	txn.SetOption(ekv.Pessimistic, true)
   710  	time.Sleep(time.Millisecond * 100)
   711  	lockCtx := &ekv.LockCtx{
   712  		ForUFIDelateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.startTS)+100, 1),
   713  		WaitStartTime:  time.Now(),
   714  	}
   715  	err := txn.LockKeys(context.Background(), lockCtx, key)
   716  	c.Assert(err, IsNil)
   717  	lockInfo := s.getLockInfo(c, key)
   718  	c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&ManagedLockTTL), GreaterEqual, uint64(100))
   719  	c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&ManagedLockTTL), Less, uint64(150))
   720  }
   721  
   722  func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) {
   723  	s.cluster.SplitKeys(ekv.Key("d"), ekv.Key("a"), 4)
   724  	k1 := ekv.Key("a") // insert but deleted key at first pos in txn1
   725  	k2 := ekv.Key("b") // insert key at second pos in txn1
   726  	k3 := ekv.Key("c") // insert key in txn1 and will be conflict read by txn2
   727  
   728  	// insert k1, k2, k3 and delete k1
   729  	txn1 := s.begin(c)
   730  	txn1.DelOption(ekv.Pessimistic)
   731  	txn1.causetstore.txnLatches = nil
   732  	txn1.Get(context.Background(), k1)
   733  	txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, ekv.SetPresumeKeyNotExists)
   734  	txn1.Set(k2, []byte{1})
   735  	txn1.Set(k3, []byte{2})
   736  	txn1.Delete(k1)
   737  	committer1, err := newTwoPhaseCommitter(txn1, 0)
   738  	c.Assert(err, IsNil)
   739  	// setup test knob in txn's committer
   740  	committer1.testingKnobs.acAfterCommitPrimary = make(chan struct{})
   741  	committer1.testingKnobs.bkAfterCommitPrimary = make(chan struct{})
   742  	txn1.committer = committer1
   743  	var txn1Done sync.WaitGroup
   744  	txn1Done.Add(1)
   745  	go func() {
   746  		err1 := txn1.Commit(context.Background())
   747  		c.Assert(err1, IsNil)
   748  		txn1Done.Done()
   749  	}()
   750  	// resume after after primary key be committed
   751  	<-txn1.committer.testingKnobs.acAfterCommitPrimary
   752  
   753  	// start txn2 to read k3(prewrite success and primary should be committed)
   754  	txn2 := s.begin(c)
   755  	txn2.DelOption(ekv.Pessimistic)
   756  	txn2.causetstore.txnLatches = nil
   757  	v, err := txn2.Get(context.Background(), k3)
   758  	c.Assert(err, IsNil) // should resolve dagger and read txn1 k3 result instead of rollback it.
   759  	c.Assert(v[0], Equals, byte(2))
   760  	txn1.committer.testingKnobs.bkAfterCommitPrimary <- struct{}{}
   761  	txn1Done.Wait()
   762  }
   763  
   764  func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) {
   765  	s.cluster.SplitKeys(ekv.Key("d"), ekv.Key("a"), 4)
   766  	k1 := ekv.Key("a")
   767  	k2 := ekv.Key("b")
   768  	k3 := ekv.Key("c")
   769  
   770  	// insert k1, k2, k3 and delete k1, k2, k3
   771  	txn1 := s.begin(c)
   772  	txn1.DelOption(ekv.Pessimistic)
   773  	txn1.causetstore.txnLatches = nil
   774  	txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, ekv.SetPresumeKeyNotExists)
   775  	txn1.Delete(k1)
   776  	txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, ekv.SetPresumeKeyNotExists)
   777  	txn1.Delete(k2)
   778  	txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, ekv.SetPresumeKeyNotExists)
   779  	txn1.Delete(k3)
   780  	err1 := txn1.Commit(context.Background())
   781  	c.Assert(err1, IsNil)
   782  }
   783  
   784  func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) {
   785  	s.cluster.SplitKeys(ekv.Key("d"), ekv.Key("a"), 4)
   786  	k1 := ekv.Key("a")
   787  	k2 := ekv.Key("b")
   788  	k3 := ekv.Key("c")
   789  
   790  	// insert k1, k2, k2 and delete k1
   791  	txn1 := s.begin(c)
   792  	txn1.DelOption(ekv.Pessimistic)
   793  	txn1.causetstore.txnLatches = nil
   794  	txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, ekv.SetPresumeKeyNotExists)
   795  	txn1.Delete(k1)
   796  	err := txn1.LockKeys(context.Background(), &ekv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for uFIDelate
   797  	c.Assert(err, IsNil)
   798  
   799  	committer1, err := newTwoPhaseCommitter(txn1, 0)
   800  	c.Assert(err, IsNil)
   801  	// setup test knob in txn's committer
   802  	committer1.testingKnobs.acAfterCommitPrimary = make(chan struct{})
   803  	committer1.testingKnobs.bkAfterCommitPrimary = make(chan struct{})
   804  	txn1.committer = committer1
   805  	var txn1Done sync.WaitGroup
   806  	txn1Done.Add(1)
   807  	go func() {
   808  		err1 := txn1.Commit(context.Background())
   809  		c.Assert(err1, IsNil)
   810  		txn1Done.Done()
   811  	}()
   812  	// resume after after primary key be committed
   813  	<-txn1.committer.testingKnobs.acAfterCommitPrimary
   814  	// start txn2 to read k3
   815  	txn2 := s.begin(c)
   816  	txn2.DelOption(ekv.Pessimistic)
   817  	txn2.causetstore.txnLatches = nil
   818  	err = txn2.Set(k3, []byte{33})
   819  	c.Assert(err, IsNil)
   820  	var meetLocks []*Lock
   821  	txn2.causetstore.lockResolver.testingKnobs.meetLock = func(locks []*Lock) {
   822  		meetLocks = append(meetLocks, locks...)
   823  	}
   824  	err = txn2.Commit(context.Background())
   825  	c.Assert(err, IsNil)
   826  	txn1.committer.testingKnobs.bkAfterCommitPrimary <- struct{}{}
   827  	txn1Done.Wait()
   828  	c.Assert(meetLocks[0].Primary[0], Equals, k2[0])
   829  }
   830  
   831  // TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
   832  // The dagger's own TTL is expired but the primary key is still alive due to heartbeats.
   833  func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
   834  	atomic.StoreUint64(&ManagedLockTTL, 1000)       // 1s
   835  	defer atomic.StoreUint64(&ManagedLockTTL, 3000) // restore default test value
   836  
   837  	// k1 is the primary dagger of txn1
   838  	k1 := ekv.Key("k1")
   839  	// k2 is a secondary dagger of txn1 and a key txn2 wants to dagger
   840  	k2 := ekv.Key("k2")
   841  
   842  	txn1 := s.begin(c)
   843  	txn1.SetOption(ekv.Pessimistic, true)
   844  	// dagger the primary key
   845  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
   846  	err := txn1.LockKeys(context.Background(), lockCtx, k1)
   847  	c.Assert(err, IsNil)
   848  	// dagger the secondary key
   849  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
   850  	err = txn1.LockKeys(context.Background(), lockCtx, k2)
   851  	c.Assert(err, IsNil)
   852  
   853  	// Heartbeats will increase the TTL of the primary key
   854  
   855  	// wait until secondary key exceeds its own TTL
   856  	time.Sleep(time.Duration(atomic.LoadUint64(&ManagedLockTTL)) * time.Millisecond)
   857  	txn2 := s.begin(c)
   858  	txn2.SetOption(ekv.Pessimistic, true)
   859  
   860  	// test no wait
   861  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn2.startTS, LockWaitTime: ekv.LockNoWait, WaitStartTime: time.Now()}
   862  	err = txn2.LockKeys(context.Background(), lockCtx, k2)
   863  	// cannot acquire dagger immediately thus error
   864  	c.Assert(err.Error(), Equals, ErrLockAcquireFailAndNoWaitSet.Error())
   865  
   866  	// test for wait limited time (200ms)
   867  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn2.startTS, LockWaitTime: 200, WaitStartTime: time.Now()}
   868  	err = txn2.LockKeys(context.Background(), lockCtx, k2)
   869  	// cannot acquire dagger in time thus error
   870  	c.Assert(err.Error(), Equals, ErrLockWaitTimeout.Error())
   871  }
   872  
   873  func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *ekvrpcpb.LockInfo {
   874  	txn := s.begin(c)
   875  	err := txn.Set(key, key)
   876  	c.Assert(err, IsNil)
   877  	committer, err := newTwoPhaseCommitterWithInit(txn, 1)
   878  	c.Assert(err, IsNil)
   879  	bo := NewBackofferWithVars(context.Background(), getMaxBackoff, nil)
   880  	loc, err := s.causetstore.regionCache.LocateKey(bo, key)
   881  	c.Assert(err, IsNil)
   882  	batch := batchMutations{region: loc.Region, mutations: committer.mutations.subRange(0, 1)}
   883  	req := committer.buildPrewriteRequest(batch, 1)
   884  	resp, err := s.causetstore.SendReq(bo, req, loc.Region, readTimeoutShort)
   885  	c.Assert(err, IsNil)
   886  	c.Assert(resp.Resp, NotNil)
   887  	keyErrs := (resp.Resp.(*ekvrpcpb.PrewriteResponse)).Errors
   888  	c.Assert(keyErrs, HasLen, 1)
   889  	locked := keyErrs[0].Locked
   890  	c.Assert(locked, NotNil)
   891  	return locked
   892  }
   893  
   894  func (s *testCommitterSuite) TestPkNotFound(c *C) {
   895  	atomic.StoreUint64(&ManagedLockTTL, 100)        // 100ms
   896  	defer atomic.StoreUint64(&ManagedLockTTL, 3000) // restore default value
   897  	// k1 is the primary dagger of txn1
   898  	k1 := ekv.Key("k1")
   899  	// k2 is a secondary dagger of txn1 and a key txn2 wants to dagger
   900  	k2 := ekv.Key("k2")
   901  	k3 := ekv.Key("k3")
   902  
   903  	txn1 := s.begin(c)
   904  	txn1.SetOption(ekv.Pessimistic, true)
   905  	// dagger the primary key
   906  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
   907  	err := txn1.LockKeys(context.Background(), lockCtx, k1)
   908  	c.Assert(err, IsNil)
   909  	// dagger the secondary key
   910  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
   911  	err = txn1.LockKeys(context.Background(), lockCtx, k2)
   912  	c.Assert(err, IsNil)
   913  
   914  	// Stop txn ttl manager and remove primary key, like milevadb server crashes and the priamry key dagger does not exists actually,
   915  	// while the secondary dagger operation succeeded
   916  	bo := NewBackofferWithVars(context.Background(), pessimisticLockMaxBackoff, nil)
   917  	txn1.committer.ttlManager.close()
   918  	err = txn1.committer.pessimisticRollbackMutations(bo, CommitterMutations{keys: [][]byte{k1}})
   919  	c.Assert(err, IsNil)
   920  
   921  	// Txn2 tries to dagger the secondary key k2, dead loop if the left secondary dagger by txn1 not resolved
   922  	txn2 := s.begin(c)
   923  	txn2.SetOption(ekv.Pessimistic, true)
   924  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn2.startTS, WaitStartTime: time.Now()}
   925  	err = txn2.LockKeys(context.Background(), lockCtx, k2)
   926  	c.Assert(err, IsNil)
   927  
   928  	// Using smaller forUFIDelateTS cannot rollback this dagger, other dagger will fail
   929  	lockKey3 := &Lock{
   930  		Key:                k3,
   931  		Primary:            k1,
   932  		TxnID:              txn1.startTS,
   933  		TTL:                ManagedLockTTL,
   934  		TxnSize:            txnCommitBatchSize,
   935  		LockType:           ekvrpcpb.Op_PessimisticLock,
   936  		LockForUFIDelateTS: txn1.startTS - 1,
   937  	}
   938  	cleanTxns := make(map[RegionVerID]struct{})
   939  	err = s.causetstore.lockResolver.resolvePessimisticLock(bo, lockKey3, cleanTxns)
   940  	c.Assert(err, IsNil)
   941  
   942  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
   943  	err = txn1.LockKeys(context.Background(), lockCtx, k3)
   944  	c.Assert(err, IsNil)
   945  	txn3 := s.begin(c)
   946  	txn3.SetOption(ekv.Pessimistic, true)
   947  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn1.startTS - 1, WaitStartTime: time.Now(), LockWaitTime: ekv.LockNoWait}
   948  	c.Assert(failpoint.Enable("github.com/whtcorpsinc/milevadb/causetstore/einsteindb/txnNotFoundRetTTL", "return"), IsNil)
   949  	err = txn3.LockKeys(context.Background(), lockCtx, k3)
   950  	c.Assert(err.Error(), Equals, ErrLockAcquireFailAndNoWaitSet.Error())
   951  	c.Assert(failpoint.Disable("github.com/whtcorpsinc/milevadb/causetstore/einsteindb/txnNotFoundRetTTL"), IsNil)
   952  }
   953  
   954  func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
   955  	// a is the primary dagger of txn1
   956  	k1 := ekv.Key("a")
   957  	// b is a secondary dagger of txn1 and a key txn2 wants to dagger, b is on another region
   958  	k2 := ekv.Key("b")
   959  
   960  	txn1 := s.begin(c)
   961  	txn1.SetOption(ekv.Pessimistic, true)
   962  	// txn1 dagger k1
   963  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
   964  	err := txn1.LockKeys(context.Background(), lockCtx, k1)
   965  	c.Assert(err, IsNil)
   966  
   967  	// txn2 wants to dagger k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
   968  	// dagger primary key first and then secondary keys concurrently, k2 should not be locked by txn2
   969  	doneCh := make(chan error)
   970  	go func() {
   971  		txn2 := s.begin(c)
   972  		txn2.SetOption(ekv.Pessimistic, true)
   973  		lockCtx2 := &ekv.LockCtx{ForUFIDelateTS: txn2.startTS, WaitStartTime: time.Now(), LockWaitTime: 200}
   974  		waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
   975  		doneCh <- waitErr
   976  	}()
   977  	time.Sleep(50 * time.Millisecond)
   978  
   979  	// txn3 should locks k2 successfully using no wait
   980  	txn3 := s.begin(c)
   981  	txn3.SetOption(ekv.Pessimistic, true)
   982  	lockCtx3 := &ekv.LockCtx{ForUFIDelateTS: txn3.startTS, WaitStartTime: time.Now(), LockWaitTime: ekv.LockNoWait}
   983  	c.Assert(failpoint.Enable("github.com/whtcorpsinc/milevadb/causetstore/einsteindb/txnNotFoundRetTTL", "return"), IsNil)
   984  	err = txn3.LockKeys(context.Background(), lockCtx3, k2)
   985  	c.Assert(failpoint.Disable("github.com/whtcorpsinc/milevadb/causetstore/einsteindb/txnNotFoundRetTTL"), IsNil)
   986  	c.Assert(err, IsNil)
   987  	waitErr := <-doneCh
   988  	c.Assert(ErrLockWaitTimeout.Equal(waitErr), IsTrue)
   989  }
   990  
   991  func (c *twoPhaseCommitter) mutationsOfKeys(keys [][]byte) CommitterMutations {
   992  	var res CommitterMutations
   993  	for i := range c.mutations.keys {
   994  		for _, key := range keys {
   995  			if bytes.Equal(c.mutations.keys[i], key) {
   996  				res.Push(c.mutations.ops[i], c.mutations.keys[i], c.mutations.values[i], c.mutations.isPessimisticLock[i])
   997  				break
   998  			}
   999  		}
  1000  	}
  1001  	return res
  1002  }
  1003  
  1004  func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
  1005  	// Split into two region and let k1 k2 in different regions.
  1006  	s.cluster.SplitKeys(ekv.Key("z"), ekv.Key("a"), 2)
  1007  	k1 := ekv.Key("a_deadlock_k1")
  1008  	k2 := ekv.Key("y_deadlock_k2")
  1009  
  1010  	region1, _ := s.cluster.GetRegionByKey(k1)
  1011  	region2, _ := s.cluster.GetRegionByKey(k2)
  1012  	c.Assert(region1.Id != region2.Id, IsTrue)
  1013  
  1014  	txn1 := s.begin(c)
  1015  	txn1.Set(k1, []byte("t1"))
  1016  	txn1.Set(k2, []byte("t1"))
  1017  	commit1, err := newTwoPhaseCommitterWithInit(txn1, 1)
  1018  	c.Assert(err, IsNil)
  1019  	commit1.primaryKey = k1
  1020  	commit1.txnSize = 1000 * 1024 * 1024
  1021  	commit1.lockTTL = txnLockTTL(txn1.startTime, commit1.txnSize)
  1022  
  1023  	txn2 := s.begin(c)
  1024  	txn2.Set(k1, []byte("t2"))
  1025  	txn2.Set(k2, []byte("t2"))
  1026  	commit2, err := newTwoPhaseCommitterWithInit(txn2, 2)
  1027  	c.Assert(err, IsNil)
  1028  	commit2.primaryKey = k2
  1029  	commit2.txnSize = 1000 * 1024 * 1024
  1030  	commit2.lockTTL = txnLockTTL(txn1.startTime, commit2.txnSize)
  1031  
  1032  	s.cluster.ScheduleDelay(txn2.startTS, region1.Id, 5*time.Millisecond)
  1033  	s.cluster.ScheduleDelay(txn1.startTS, region2.Id, 5*time.Millisecond)
  1034  
  1035  	// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
  1036  	// protocol run ttlManager and uFIDelate their TTL, cause dead dagger.
  1037  	ch := make(chan error, 2)
  1038  	var wg sync.WaitGroup
  1039  	wg.Add(1)
  1040  	go func() {
  1041  		ch <- commit2.execute(context.Background())
  1042  		wg.Done()
  1043  	}()
  1044  	ch <- commit1.execute(context.Background())
  1045  	wg.Wait()
  1046  	close(ch)
  1047  
  1048  	res := 0
  1049  	for e := range ch {
  1050  		if e != nil {
  1051  			res++
  1052  		}
  1053  	}
  1054  	c.Assert(res, Equals, 1)
  1055  }
  1056  
  1057  // TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks.
  1058  func (s *testCommitterSuite) TestPushPessimisticLock(c *C) {
  1059  	// k1 is the primary key.
  1060  	k1, k2 := ekv.Key("a"), ekv.Key("b")
  1061  	ctx := context.Background()
  1062  
  1063  	txn1 := s.begin(c)
  1064  	txn1.SetOption(ekv.Pessimistic, true)
  1065  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
  1066  	err := txn1.LockKeys(context.Background(), lockCtx, k1, k2)
  1067  	c.Assert(err, IsNil)
  1068  
  1069  	txn1.Set(k2, []byte("v2"))
  1070  	err = txn1.committer.initKeysAndMutations()
  1071  	c.Assert(err, IsNil)
  1072  	// Strip the prewrite of the primary key.
  1073  	txn1.committer.mutations = txn1.committer.mutations.subRange(1, 2)
  1074  	c.Assert(err, IsNil)
  1075  	err = txn1.committer.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), txn1.committer.mutations)
  1076  	c.Assert(err, IsNil)
  1077  	// The primary dagger is a pessimistic dagger and the secondary dagger is a optimistic dagger.
  1078  	lock1 := s.getLockInfo(c, k1)
  1079  	c.Assert(lock1.LockType, Equals, ekvrpcpb.Op_PessimisticLock)
  1080  	c.Assert(lock1.PrimaryLock, BytesEquals, []byte(k1))
  1081  	lock2 := s.getLockInfo(c, k2)
  1082  	c.Assert(lock2.LockType, Equals, ekvrpcpb.Op_Put)
  1083  	c.Assert(lock2.PrimaryLock, BytesEquals, []byte(k1))
  1084  
  1085  	txn2 := s.begin(c)
  1086  	start := time.Now()
  1087  	_, err = txn2.Get(ctx, k2)
  1088  	elapsed := time.Since(start)
  1089  	// The optimistic dagger shouldn't causet reads.
  1090  	c.Assert(elapsed, Less, 500*time.Millisecond)
  1091  	c.Assert(ekv.IsErrNotFound(err), IsTrue)
  1092  
  1093  	txn1.Rollback()
  1094  	txn2.Rollback()
  1095  }
  1096  
  1097  // TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks,
  1098  // using clean whole region resolve path
  1099  func (s *testCommitterSuite) TestResolveMixed(c *C) {
  1100  	atomic.StoreUint64(&ManagedLockTTL, 100)        // 100ms
  1101  	defer atomic.StoreUint64(&ManagedLockTTL, 3000) // restore default value
  1102  	ctx := context.Background()
  1103  
  1104  	// pk is the primary dagger of txn1
  1105  	pk := ekv.Key("pk")
  1106  	secondaryLockkeys := make([]ekv.Key, 0, bigTxnThreshold)
  1107  	for i := 0; i < bigTxnThreshold; i++ {
  1108  		optimisticLock := ekv.Key(fmt.Sprintf("optimisticLockKey%d", i))
  1109  		secondaryLockkeys = append(secondaryLockkeys, optimisticLock)
  1110  	}
  1111  	pessimisticLockKey := ekv.Key("pessimisticLockKey")
  1112  
  1113  	// make the optimistic and pessimistic dagger left with primary dagger not found
  1114  	txn1 := s.begin(c)
  1115  	txn1.SetOption(ekv.Pessimistic, true)
  1116  	// dagger the primary key
  1117  	lockCtx := &ekv.LockCtx{ForUFIDelateTS: txn1.startTS, WaitStartTime: time.Now()}
  1118  	err := txn1.LockKeys(context.Background(), lockCtx, pk)
  1119  	c.Assert(err, IsNil)
  1120  	// dagger the optimistic keys
  1121  	for i := 0; i < bigTxnThreshold; i++ {
  1122  		txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i)))
  1123  	}
  1124  	err = txn1.committer.initKeysAndMutations()
  1125  	c.Assert(err, IsNil)
  1126  	err = txn1.committer.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), txn1.committer.mutations)
  1127  	c.Assert(err, IsNil)
  1128  	// dagger the pessimistic keys
  1129  	err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
  1130  	c.Assert(err, IsNil)
  1131  	lock1 := s.getLockInfo(c, pessimisticLockKey)
  1132  	c.Assert(lock1.LockType, Equals, ekvrpcpb.Op_PessimisticLock)
  1133  	c.Assert(lock1.PrimaryLock, BytesEquals, []byte(pk))
  1134  	optimisticLockKey := secondaryLockkeys[0]
  1135  	lock2 := s.getLockInfo(c, optimisticLockKey)
  1136  	c.Assert(lock2.LockType, Equals, ekvrpcpb.Op_Put)
  1137  	c.Assert(lock2.PrimaryLock, BytesEquals, []byte(pk))
  1138  
  1139  	// stop txn ttl manager and remove primary key, make the other keys left behind
  1140  	bo := NewBackofferWithVars(context.Background(), pessimisticLockMaxBackoff, nil)
  1141  	txn1.committer.ttlManager.close()
  1142  	err = txn1.committer.pessimisticRollbackMutations(bo, CommitterMutations{keys: [][]byte{pk}})
  1143  	c.Assert(err, IsNil)
  1144  
  1145  	// try to resolve the left optimistic locks, use clean whole region
  1146  	cleanTxns := make(map[RegionVerID]struct{})
  1147  	time.Sleep(time.Duration(atomic.LoadUint64(&ManagedLockTTL)) * time.Millisecond)
  1148  	optimisticLockInfo := s.getLockInfo(c, optimisticLockKey)
  1149  	dagger := NewLock(optimisticLockInfo)
  1150  	err = s.causetstore.lockResolver.resolveLock(NewBackofferWithVars(ctx, pessimisticLockMaxBackoff, nil), dagger, TxnStatus{}, false, cleanTxns)
  1151  	c.Assert(err, IsNil)
  1152  
  1153  	// txn2 tries to dagger the pessimisticLockKey, the dagger should has been resolved in clean whole region resolve
  1154  	txn2 := s.begin(c)
  1155  	txn2.SetOption(ekv.Pessimistic, true)
  1156  	lockCtx = &ekv.LockCtx{ForUFIDelateTS: txn2.startTS, WaitStartTime: time.Now(), LockWaitTime: ekv.LockNoWait}
  1157  	err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
  1158  	c.Assert(err, IsNil)
  1159  
  1160  	err = txn1.Rollback()
  1161  	c.Assert(err, IsNil)
  1162  	err = txn2.Rollback()
  1163  	c.Assert(err, IsNil)
  1164  }
  1165  
  1166  // TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an
  1167  // accurate list of secondary keys.
  1168  func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) {
  1169  	defer config.RestoreFunc()()
  1170  	config.UFIDelateGlobal(func(conf *config.Config) {
  1171  		conf.EinsteinDBClient.EnableAsyncCommit = true
  1172  	})
  1173  
  1174  	// Prepare two regions first: (, 100) and [100, )
  1175  	region, _ := s.cluster.GetRegionByKey([]byte{50})
  1176  	newRegionID := s.cluster.AllocID()
  1177  	newPeerID := s.cluster.AllocID()
  1178  	s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
  1179  
  1180  	txn := s.begin(c)
  1181  	var val [1024]byte
  1182  	for i := byte(50); i < 120; i++ {
  1183  		err := txn.Set([]byte{i}, val[:])
  1184  		c.Assert(err, IsNil)
  1185  	}
  1186  	// Some duplicates.
  1187  	for i := byte(50); i < 120; i += 10 {
  1188  		err := txn.Set([]byte{i}, val[512:700])
  1189  		c.Assert(err, IsNil)
  1190  	}
  1191  
  1192  	committer, err := newTwoPhaseCommitterWithInit(txn, 1)
  1193  	c.Assert(err, IsNil)
  1194  
  1195  	mock := mockClient{inner: s.causetstore.client}
  1196  	s.causetstore.client = &mock
  1197  	ctx := context.Background()
  1198  	// TODO remove this when minCommitTS is returned from mockStore prewrite response.
  1199  	committer.minCommitTS = committer.startTS + 10
  1200  	committer.testingKnobs.noFallBack = true
  1201  	err = committer.execute(ctx)
  1202  	c.Assert(err, IsNil)
  1203  	c.Assert(mock.seenPrimaryReq > 0, IsTrue)
  1204  	c.Assert(mock.seenSecondaryReq > 0, IsTrue)
  1205  }
  1206  
  1207  func (s *testCommitterSuite) TestAsyncCommit(c *C) {
  1208  	defer config.RestoreFunc()()
  1209  	config.UFIDelateGlobal(func(conf *config.Config) {
  1210  		conf.EinsteinDBClient.EnableAsyncCommit = true
  1211  	})
  1212  
  1213  	ctx := context.Background()
  1214  	pk := ekv.Key("tpk")
  1215  	pkVal := []byte("pkVal")
  1216  	k1 := ekv.Key("tk1")
  1217  	k1Val := []byte("k1Val")
  1218  	txn1 := s.begin(c)
  1219  	err := txn1.Set(pk, pkVal)
  1220  	c.Assert(err, IsNil)
  1221  	err = txn1.Set(k1, k1Val)
  1222  	c.Assert(err, IsNil)
  1223  
  1224  	committer, err := newTwoPhaseCommitterWithInit(txn1, 0)
  1225  	c.Assert(err, IsNil)
  1226  	committer.connID = 1
  1227  	committer.minCommitTS = txn1.startTS + 10
  1228  	err = committer.execute(ctx)
  1229  	c.Assert(err, IsNil)
  1230  
  1231  	// TODO remove sleep when recovery logic is done
  1232  	time.Sleep(1 * time.Second)
  1233  	s.checkValues(c, map[string]string{
  1234  		string(pk): string(pkVal),
  1235  		string(k1): string(k1Val),
  1236  	})
  1237  }
  1238  
  1239  type mockClient struct {
  1240  	inner            Client
  1241  	seenPrimaryReq   uint32
  1242  	seenSecondaryReq uint32
  1243  }
  1244  
  1245  func (m *mockClient) SendRequest(ctx context.Context, addr string, req *einsteindbrpc.Request, timeout time.Duration) (*einsteindbrpc.Response, error) {
  1246  	// If we find a prewrite request, check if it satisfies our constraints.
  1247  	if pr, ok := req.Req.(*ekvrpcpb.PrewriteRequest); ok {
  1248  		if pr.UseAsyncCommit {
  1249  			if isPrimary(pr) {
  1250  				// The primary key should not be included, nor should there be any duplicates. All keys should be present.
  1251  				if !includesPrimary(pr) && allKeysNoDups(pr) {
  1252  					atomic.StoreUint32(&m.seenPrimaryReq, 1)
  1253  				}
  1254  			} else {
  1255  				// Secondaries should only be sent with the primary key
  1256  				if len(pr.Secondaries) == 0 {
  1257  					atomic.StoreUint32(&m.seenSecondaryReq, 1)
  1258  				}
  1259  			}
  1260  		}
  1261  	}
  1262  	return m.inner.SendRequest(ctx, addr, req, timeout)
  1263  }
  1264  
  1265  func (m *mockClient) Close() error {
  1266  	return m.inner.Close()
  1267  }
  1268  
  1269  func isPrimary(req *ekvrpcpb.PrewriteRequest) bool {
  1270  	for _, m := range req.Mutations {
  1271  		if bytes.Equal(req.PrimaryLock, m.Key) {
  1272  			return true
  1273  		}
  1274  	}
  1275  
  1276  	return false
  1277  }
  1278  
  1279  func includesPrimary(req *ekvrpcpb.PrewriteRequest) bool {
  1280  	for _, k := range req.Secondaries {
  1281  		if bytes.Equal(req.PrimaryLock, k) {
  1282  			return true
  1283  		}
  1284  	}
  1285  
  1286  	return false
  1287  }
  1288  
  1289  func allKeysNoDups(req *ekvrpcpb.PrewriteRequest) bool {
  1290  	check := make(map[string]bool)
  1291  
  1292  	// Create the check map and check for duplicates.
  1293  	for _, k := range req.Secondaries {
  1294  		s := string(k)
  1295  		if check[s] {
  1296  			return false
  1297  		}
  1298  		check[s] = true
  1299  	}
  1300  
  1301  	// Check every key is present.
  1302  	for i := byte(50); i < 120; i++ {
  1303  		k := []byte{i}
  1304  		if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] {
  1305  			return false
  1306  		}
  1307  	}
  1308  	return true
  1309  }