github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvclient/kvcoord/txn_test.go (about)

     1  // Copyright 2014 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package kvcoord
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"fmt"
    17  	"testing"
    18  	"time"
    19  
    20  	"github.com/cockroachdb/cockroach/pkg/keys"
    21  	"github.com/cockroachdb/cockroach/pkg/kv"
    22  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
    23  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/tscache"
    24  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    25  	"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
    26  	"github.com/cockroachdb/cockroach/pkg/testutils"
    27  	"github.com/cockroachdb/cockroach/pkg/testutils/localtestcluster"
    28  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    29  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    30  	"github.com/cockroachdb/errors"
    31  	"github.com/stretchr/testify/require"
    32  )
    33  
    34  // TestTxnDBBasics verifies that a simple transaction can be run and
    35  // either committed or aborted. On commit, mutations are visible; on
    36  // abort, mutations are never visible. During the txn, verify that
    37  // uncommitted writes cannot be read outside of the txn but can be
    38  // read from inside the txn.
    39  func TestTxnDBBasics(t *testing.T) {
    40  	defer leaktest.AfterTest(t)()
    41  	s := createTestDB(t)
    42  	defer s.Stop()
    43  	value := []byte("value")
    44  
    45  	for _, commit := range []bool{true, false} {
    46  		key := []byte(fmt.Sprintf("key-%t", commit))
    47  
    48  		err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
    49  			// Put transactional value.
    50  			if err := txn.Put(ctx, key, value); err != nil {
    51  				return err
    52  			}
    53  
    54  			// Attempt to read in another txn.
    55  			conflictTxn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */)
    56  			conflictTxn.TestingSetPriority(enginepb.MaxTxnPriority)
    57  			if gr, err := conflictTxn.Get(ctx, key); err != nil {
    58  				return err
    59  			} else if gr.Exists() {
    60  				return errors.Errorf("expected nil value; got %v", gr.Value)
    61  			}
    62  
    63  			// Read within the transaction.
    64  			if gr, err := txn.Get(ctx, key); err != nil {
    65  				return err
    66  			} else if !gr.Exists() || !bytes.Equal(gr.ValueBytes(), value) {
    67  				return errors.Errorf("expected value %q; got %q", value, gr.Value)
    68  			}
    69  
    70  			if !commit {
    71  				return errors.Errorf("purposefully failing transaction")
    72  			}
    73  			return nil
    74  		})
    75  
    76  		if commit != (err == nil) {
    77  			t.Errorf("expected success? %t; got %s", commit, err)
    78  		} else if !commit && !testutils.IsError(err, "purposefully failing transaction") {
    79  			t.Errorf("unexpected failure with !commit: %v", err)
    80  		}
    81  
    82  		// Verify the value is now visible on commit == true, and not visible otherwise.
    83  		gr, err := s.DB.Get(context.Background(), key)
    84  		if commit {
    85  			if err != nil || !gr.Exists() || !bytes.Equal(gr.ValueBytes(), value) {
    86  				t.Errorf("expected success reading value: %+v, %s", gr.ValueBytes(), err)
    87  			}
    88  		} else {
    89  			if err != nil || gr.Exists() {
    90  				t.Errorf("expected success and nil value: %s, %s", gr, err)
    91  			}
    92  		}
    93  	}
    94  }
    95  
    96  // BenchmarkSingleRoundtripWithLatency runs a number of transactions writing
    97  // to the same key back to back in a single round-trip. Latency is simulated
    98  // by pausing before each RPC sent.
    99  func BenchmarkSingleRoundtripWithLatency(b *testing.B) {
   100  	for _, latency := range []time.Duration{0, 10 * time.Millisecond} {
   101  		b.Run(fmt.Sprintf("latency=%s", latency), func(b *testing.B) {
   102  			var s localtestcluster.LocalTestCluster
   103  			s.Latency = latency
   104  			s.Start(b, testutils.NewNodeTestBaseContext(), InitFactoryForLocalTestCluster)
   105  			defer s.Stop()
   106  			defer b.StopTimer()
   107  			key := roachpb.Key("key")
   108  			b.ResetTimer()
   109  			for i := 0; i < b.N; i++ {
   110  				if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   111  					b := txn.NewBatch()
   112  					b.Put(key, fmt.Sprintf("value-%d", i))
   113  					return txn.CommitInBatch(ctx, b)
   114  				}); err != nil {
   115  					b.Fatal(err)
   116  				}
   117  			}
   118  		})
   119  	}
   120  }
   121  
   122  // TestLostUpdate verifies that transactions are not susceptible to the
   123  // lost update anomaly.
   124  //
   125  // The transaction history looks as follows ("2" refers to the
   126  // independent goroutine's actions)
   127  //
   128  //   R1(A) W2(A,"hi") W1(A,"oops!") C1 [serializable restart] R1(A) W1(A,"correct") C1
   129  func TestLostUpdate(t *testing.T) {
   130  	defer leaktest.AfterTest(t)()
   131  	s := createTestDB(t)
   132  	defer s.Stop()
   133  	var key = roachpb.Key("a")
   134  
   135  	done := make(chan error)
   136  	start := make(chan struct{})
   137  	go func() {
   138  		<-start
   139  		done <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   140  			return txn.Put(ctx, key, "hi")
   141  		})
   142  	}()
   143  
   144  	firstAttempt := true
   145  	if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   146  		// Issue a read to get initial value.
   147  		gr, err := txn.Get(ctx, key)
   148  		if err != nil {
   149  			t.Fatal(err)
   150  		}
   151  		if txn.Epoch() == 0 {
   152  			close(start) // let someone write into our future
   153  			// When they're done, write based on what we read.
   154  			if err := <-done; err != nil {
   155  				t.Fatal(err)
   156  			}
   157  		} else if txn.Epoch() > 1 {
   158  			t.Fatal("should experience just one restart")
   159  		}
   160  
   161  		var newVal string
   162  		if gr.Exists() && bytes.Equal(gr.ValueBytes(), []byte("hi")) {
   163  			newVal = "correct"
   164  		} else {
   165  			newVal = "oops!"
   166  		}
   167  		b := txn.NewBatch()
   168  		b.Put(key, newVal)
   169  		err = txn.Run(ctx, b)
   170  		if firstAttempt {
   171  			require.Regexp(t, "RETRY_WRITE_TOO_OLD", err)
   172  			firstAttempt = false
   173  			return err
   174  		}
   175  		require.NoError(t, err)
   176  		return nil
   177  	}); err != nil {
   178  		t.Fatal(err)
   179  	}
   180  
   181  	// Verify final value.
   182  	gr, err := s.DB.Get(context.Background(), key)
   183  	if err != nil {
   184  		t.Fatal(err)
   185  	}
   186  	if !gr.Exists() || !bytes.Equal(gr.ValueBytes(), []byte("correct")) {
   187  		t.Fatalf("expected \"correct\", got %q", gr.ValueBytes())
   188  	}
   189  }
   190  
   191  // TestPriorityRatchetOnAbortOrPush verifies that the priority of
   192  // a transaction is ratcheted by successive aborts or pushes. In
   193  // particular, we want to ensure ratcheted priorities when the txn
   194  // discovers it's been aborted or pushed through a poisoned sequence
   195  // cache. This happens when a concurrent writer aborts an intent or a
   196  // concurrent reader pushes an intent.
   197  func TestPriorityRatchetOnAbortOrPush(t *testing.T) {
   198  	defer leaktest.AfterTest(t)()
   199  	s := createTestDB(t)
   200  	defer s.Stop()
   201  
   202  	pushByReading := func(key roachpb.Key) {
   203  		if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   204  			if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil {
   205  				t.Fatal(err)
   206  			}
   207  			_, err := txn.Get(ctx, key)
   208  			return err
   209  		}); err != nil {
   210  			t.Fatal(err)
   211  		}
   212  	}
   213  	abortByWriting := func(key roachpb.Key) {
   214  		if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   215  			if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil {
   216  				t.Fatal(err)
   217  			}
   218  			return txn.Put(ctx, key, "foo")
   219  		}); err != nil {
   220  			t.Fatal(err)
   221  		}
   222  	}
   223  
   224  	// Try both read and write.
   225  	for _, read := range []bool{true, false} {
   226  		var iteration int
   227  		if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   228  			defer func() { iteration++ }()
   229  			key := roachpb.Key(fmt.Sprintf("read=%t", read))
   230  
   231  			// Write to lay down an intent (this will send the begin
   232  			// transaction which gets the updated priority).
   233  			if err := txn.Put(ctx, key, "bar"); err != nil {
   234  				return err
   235  			}
   236  
   237  			if iteration == 1 {
   238  				// Verify our priority has ratcheted to one less than the pusher's priority
   239  				expPri := enginepb.MaxTxnPriority - 1
   240  				if pri := txn.TestingCloneTxn().Priority; pri != expPri {
   241  					t.Fatalf("%s: expected priority on retry to ratchet to %d; got %d", key, expPri, pri)
   242  				}
   243  				return nil
   244  			}
   245  
   246  			// Now simulate a concurrent reader or writer. Our txn will
   247  			// either be pushed or aborted. Then issue a read and verify
   248  			// that if we've been pushed, no error is returned and if we
   249  			// have been aborted, we get an aborted error.
   250  			var err error
   251  			if read {
   252  				pushByReading(key)
   253  				_, err = txn.Get(ctx, key)
   254  				if err != nil {
   255  					t.Fatalf("%s: expected no error; got %s", key, err)
   256  				}
   257  			} else {
   258  				abortByWriting(key)
   259  				_, err = txn.Get(ctx, key)
   260  				assertTransactionAbortedError(t, err)
   261  			}
   262  
   263  			return err
   264  		}); err != nil {
   265  			t.Fatal(err)
   266  		}
   267  	}
   268  }
   269  
   270  // TestTxnTimestampRegression verifies that if a transaction's timestamp is
   271  // pushed forward by a concurrent read, it may still commit. A bug in the EndTxn
   272  // implementation used to compare the transaction's current timestamp instead of
   273  // original timestamp.
   274  func TestTxnTimestampRegression(t *testing.T) {
   275  	defer leaktest.AfterTest(t)()
   276  	s := createTestDB(t)
   277  	defer s.Stop()
   278  
   279  	keyA := "a"
   280  	keyB := "b"
   281  	err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   282  		// Put transactional value.
   283  		if err := txn.Put(ctx, keyA, "value1"); err != nil {
   284  			return err
   285  		}
   286  
   287  		// Attempt to read in another txn (this will push timestamp of transaction).
   288  		conflictTxn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */)
   289  		conflictTxn.TestingSetPriority(enginepb.MaxTxnPriority)
   290  		if _, err := conflictTxn.Get(context.Background(), keyA); err != nil {
   291  			return err
   292  		}
   293  
   294  		// Now, read again outside of txn to warmup timestamp cache with higher timestamp.
   295  		if _, err := s.DB.Get(context.Background(), keyB); err != nil {
   296  			return err
   297  		}
   298  
   299  		// Write now to keyB, which will get a higher timestamp than keyB was written at.
   300  		return txn.Put(ctx, keyB, "value2")
   301  	})
   302  	if err != nil {
   303  		t.Fatal(err)
   304  	}
   305  }
   306  
   307  // TestTxnLongDelayBetweenWritesWithConcurrentRead simulates a
   308  // situation where the delay between two writes in a txn is longer
   309  // than 10 seconds.
   310  // See issue #676 for full details about original bug.
   311  func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) {
   312  	defer leaktest.AfterTest(t)()
   313  	s := createTestDB(t)
   314  	defer s.Stop()
   315  
   316  	keyA := roachpb.Key("a")
   317  	keyB := roachpb.Key("b")
   318  	ch := make(chan struct{})
   319  	errChan := make(chan error)
   320  	go func() {
   321  		errChan <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   322  			// Put transactional value.
   323  			if err := txn.Put(ctx, keyA, "value1"); err != nil {
   324  				return err
   325  			}
   326  			// Notify txnB do 1st get(b).
   327  			ch <- struct{}{}
   328  			// Wait for txnB notify us to put(b).
   329  			<-ch
   330  			// Write now to keyB.
   331  			return txn.Put(ctx, keyB, "value2")
   332  		})
   333  	}()
   334  
   335  	// Wait till txnA finish put(a).
   336  	<-ch
   337  	// Delay for longer than the cache window.
   338  	s.Manual.Increment((tscache.MinRetentionWindow + time.Second).Nanoseconds())
   339  	if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   340  		// Attempt to get first keyB.
   341  		gr1, err := txn.Get(ctx, keyB)
   342  		if err != nil {
   343  			return err
   344  		}
   345  		// Notify txnA put(b).
   346  		ch <- struct{}{}
   347  		// Wait for txnA finish commit.
   348  		if err := <-errChan; err != nil {
   349  			t.Fatal(err)
   350  		}
   351  		// get(b) again.
   352  		gr2, err := txn.Get(ctx, keyB)
   353  		if err != nil {
   354  			return err
   355  		}
   356  
   357  		if gr1.Exists() || gr2.Exists() {
   358  			t.Fatalf("Repeat read same key in same txn but get different value gr1: %q, gr2 %q", gr1.Value, gr2.Value)
   359  		}
   360  		return nil
   361  	}); err != nil {
   362  		t.Fatal(err)
   363  	}
   364  }
   365  
   366  // TestTxnRepeatGetWithRangeSplit simulates two writes in a single
   367  // transaction, with a range split occurring between. The second write
   368  // is sent to the new range. The test verifies that another transaction
   369  // reading before and after the split will read the same values.
   370  // See issue #676 for full details about original bug.
   371  func TestTxnRepeatGetWithRangeSplit(t *testing.T) {
   372  	defer leaktest.AfterTest(t)()
   373  	s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), &kvserver.StoreTestingKnobs{
   374  		DisableScanner:    true,
   375  		DisableSplitQueue: true,
   376  		DisableMergeQueue: true,
   377  	})
   378  	defer s.Stop()
   379  
   380  	keyA := roachpb.Key("a")
   381  	keyC := roachpb.Key("c")
   382  	splitKey := roachpb.Key("b")
   383  	ch := make(chan struct{})
   384  	errChan := make(chan error)
   385  	go func() {
   386  		errChan <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   387  			// Put transactional value.
   388  			if err := txn.Put(ctx, keyA, "value1"); err != nil {
   389  				return err
   390  			}
   391  			// Notify txnB do 1st get(c).
   392  			ch <- struct{}{}
   393  			// Wait for txnB notify us to put(c).
   394  			<-ch
   395  			// Write now to keyC, which will keep timestamp.
   396  			return txn.Put(ctx, keyC, "value2")
   397  		})
   398  	}()
   399  
   400  	// Wait till txnA finish put(a).
   401  	<-ch
   402  
   403  	if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   404  		// First get keyC, value will be nil.
   405  		gr1, err := txn.Get(ctx, keyC)
   406  		if err != nil {
   407  			return err
   408  		}
   409  		s.Manual.Increment(time.Second.Nanoseconds())
   410  		// Split range by keyB.
   411  		if err := s.DB.AdminSplit(context.Background(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil {
   412  			t.Fatal(err)
   413  		}
   414  		// Wait till split complete.
   415  		// Check that we split 1 times in allotted time.
   416  		testutils.SucceedsSoon(t, func() error {
   417  			// Scan the meta records.
   418  			rows, serr := s.DB.Scan(context.Background(), keys.Meta2Prefix, keys.MetaMax, 0)
   419  			if serr != nil {
   420  				t.Fatalf("failed to scan meta2 keys: %s", serr)
   421  			}
   422  			if len(rows) >= 2 {
   423  				return nil
   424  			}
   425  			return errors.Errorf("failed to split")
   426  		})
   427  		// Notify txnA put(c).
   428  		ch <- struct{}{}
   429  		// Wait for txnA finish commit.
   430  		if err := <-errChan; err != nil {
   431  			t.Fatal(err)
   432  		}
   433  		// Get(c) again.
   434  		gr2, err := txn.Get(ctx, keyC)
   435  		if err != nil {
   436  			return err
   437  		}
   438  
   439  		if !gr1.Exists() && gr2.Exists() {
   440  			t.Fatalf("Repeat read same key in same txn but get different value gr1 nil gr2 %v", gr2.Value)
   441  		}
   442  		return nil
   443  	}); err != nil {
   444  		t.Fatal(err)
   445  	}
   446  }
   447  
   448  // TestTxnRestartedSerializableTimestampRegression verifies that there is
   449  // no timestamp regression error in the event that a pushed txn record disagrees
   450  // with the original timestamp of a restarted transaction.
   451  func TestTxnRestartedSerializableTimestampRegression(t *testing.T) {
   452  	defer leaktest.AfterTest(t)()
   453  	s := createTestDB(t)
   454  	defer s.Stop()
   455  
   456  	keyA := "a"
   457  	keyB := "b"
   458  	ch := make(chan struct{})
   459  	errChan := make(chan error)
   460  	var count int
   461  	go func() {
   462  		errChan <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
   463  			count++
   464  			// Use a low priority for the transaction so that it can be pushed.
   465  			if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil {
   466  				t.Fatal(err)
   467  			}
   468  
   469  			// Put transactional value.
   470  			if err := txn.Put(ctx, keyA, "value1"); err != nil {
   471  				return err
   472  			}
   473  			if count <= 1 {
   474  				// Notify concurrent getter to push txnA on get(a).
   475  				ch <- struct{}{}
   476  				// Wait for txnB notify us to commit.
   477  				<-ch
   478  			}
   479  			// Do a write to keyB, which will forward txn timestamp.
   480  			return txn.Put(ctx, keyB, "value2")
   481  		})
   482  	}()
   483  
   484  	// Wait until txnA finishes put(a).
   485  	<-ch
   486  	// Attempt to get keyA, which will push txnA.
   487  	if _, err := s.DB.Get(context.Background(), keyA); err != nil {
   488  		t.Fatal(err)
   489  	}
   490  	// Do a read at keyB to cause txnA to forward timestamp.
   491  	if _, err := s.DB.Get(context.Background(), keyB); err != nil {
   492  		t.Fatal(err)
   493  	}
   494  	// Notify txnA to commit.
   495  	ch <- struct{}{}
   496  
   497  	// Wait for txnA to finish.
   498  	if err := <-errChan; err != nil {
   499  		t.Fatal(err)
   500  	}
   501  	// We expect no restarts (so a count of one). The transaction continues
   502  	// despite the push and timestamp forwarding in order to lay down all
   503  	// intents in the first pass. On the first EndTxn, the difference in
   504  	// timestamps would cause the serializable transaction to update spans, but
   505  	// only writes occurred during the transaction, so the commit succeeds.
   506  	const expCount = 1
   507  	if count != expCount {
   508  		t.Fatalf("expected %d restarts, but got %d", expCount, count)
   509  	}
   510  }
   511  
   512  // TestTxnResolveIntentsFromMultipleEpochs verifies that that intents
   513  // from earlier epochs are cleaned up on transaction commit.
   514  func TestTxnResolveIntentsFromMultipleEpochs(t *testing.T) {
   515  	defer leaktest.AfterTest(t)()
   516  	s := createTestDB(t)
   517  	defer s.Stop()
   518  	ctx := context.Background()
   519  
   520  	writeSkewKey := "write-skew"
   521  	keys := []string{"a", "b", "c"}
   522  	ch := make(chan struct{})
   523  	errChan := make(chan error, 1)
   524  	// Launch goroutine to write the three keys on three successive epochs.
   525  	go func() {
   526  		var count int
   527  		err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
   528  			// Read the write skew key, which will be written by another goroutine
   529  			// to ensure transaction restarts.
   530  			if _, err := txn.Get(ctx, writeSkewKey); err != nil {
   531  				return err
   532  			}
   533  			// Signal that the transaction has (re)started.
   534  			ch <- struct{}{}
   535  			// Wait for concurrent writer to write key.
   536  			<-ch
   537  			// Now write our version over the top (will get a pushed timestamp).
   538  			if err := txn.Put(ctx, keys[count], "txn"); err != nil {
   539  				return err
   540  			}
   541  			count++
   542  			return nil
   543  		})
   544  		if err != nil {
   545  			errChan <- err
   546  		} else if count < len(keys) {
   547  			errChan <- fmt.Errorf(
   548  				"expected to have to retry %d times and only retried %d times", len(keys), count-1)
   549  		} else {
   550  			errChan <- nil
   551  		}
   552  	}()
   553  
   554  	step := func(key string, causeWriteSkew bool) {
   555  		// Wait for transaction to start.
   556  		<-ch
   557  		if causeWriteSkew {
   558  			// Write to the write skew key to ensure a restart.
   559  			if err := s.DB.Put(ctx, writeSkewKey, "skew-"+key); err != nil {
   560  				t.Fatal(err)
   561  			}
   562  		}
   563  		// Read key to push txn's timestamp forward on its write.
   564  		if _, err := s.DB.Get(ctx, key); err != nil {
   565  			t.Fatal(err)
   566  		}
   567  		// Signal the transaction to continue.
   568  		ch <- struct{}{}
   569  	}
   570  
   571  	// Step 1 causes a restart.
   572  	step(keys[0], true)
   573  	// Step 2 causes a restart.
   574  	step(keys[1], true)
   575  	// Step 3 does not result in a restart.
   576  	step(keys[2], false)
   577  
   578  	// Wait for txn to finish.
   579  	if err := <-errChan; err != nil {
   580  		t.Fatal(err)
   581  	}
   582  
   583  	// Read values for three keys. The first two should be empty, the last should be "txn".
   584  	for i, k := range keys {
   585  		v, err := s.DB.Get(ctx, k)
   586  		if err != nil {
   587  			t.Fatal(err)
   588  		}
   589  		str := string(v.ValueBytes())
   590  		if i < len(keys)-1 {
   591  			if str != "" {
   592  				t.Errorf("key %s expected \"\"; got %s", k, str)
   593  			}
   594  		} else {
   595  			if str != "txn" {
   596  				t.Errorf("key %s expected \"txn\"; got %s", k, str)
   597  			}
   598  		}
   599  	}
   600  }
   601  
   602  // Test that txn.CommitTimestamp() reflects refreshes.
   603  func TestTxnCommitTimestampAdvancedByRefresh(t *testing.T) {
   604  	defer leaktest.AfterTest(t)()
   605  	ctx := context.Background()
   606  
   607  	// We're going to inject an uncertainty error, expect the refresh to succeed,
   608  	// and then check that the txn.CommitTimestamp() value reflects the refresh.
   609  	injected := false
   610  	var refreshTS hlc.Timestamp
   611  	errKey := roachpb.Key("inject_err")
   612  	s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), &kvserver.StoreTestingKnobs{
   613  		TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error {
   614  			if g, ok := ba.GetArg(roachpb.Get); ok && g.(*roachpb.GetRequest).Key.Equal(errKey) {
   615  				if injected {
   616  					return nil
   617  				}
   618  				injected = true
   619  				txn := ba.Txn.Clone()
   620  				refreshTS = txn.WriteTimestamp.Add(0, 1)
   621  				pErr := roachpb.NewReadWithinUncertaintyIntervalError(
   622  					txn.ReadTimestamp,
   623  					refreshTS,
   624  					txn)
   625  				return roachpb.NewErrorWithTxn(pErr, txn)
   626  			}
   627  			return nil
   628  		},
   629  	})
   630  	defer s.Stop()
   631  
   632  	err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
   633  		_, err := txn.Get(ctx, errKey)
   634  		if err != nil {
   635  			return err
   636  		}
   637  		if !injected {
   638  			return errors.Errorf("didn't inject err")
   639  		}
   640  		commitTS := txn.CommitTimestamp()
   641  		// We expect to have refreshed just after the timestamp injected by the error.
   642  		expTS := refreshTS.Add(0, 1)
   643  		if !commitTS.Equal(expTS) {
   644  			return errors.Errorf("expected refreshTS: %s, got: %s", refreshTS, commitTS)
   645  		}
   646  		return nil
   647  	})
   648  	require.NoError(t, err)
   649  }
   650  
   651  // Test that in some write too old situations (i.e. when the server returns the
   652  // WriteTooOld flag set and then the client fails to refresh), intents are
   653  // properly left behind.
   654  func TestTxnLeavesIntentBehindAfterWriteTooOldError(t *testing.T) {
   655  	defer leaktest.AfterTest(t)()
   656  	ctx := context.Background()
   657  	s := createTestDB(t)
   658  	defer s.Stop()
   659  
   660  	key := []byte("b")
   661  
   662  	txn := s.DB.NewTxn(ctx, "test txn")
   663  	// Perform a Get so that the transaction can't refresh.
   664  	_, err := txn.Get(ctx, key)
   665  	require.NoError(t, err)
   666  
   667  	// Another guy writes at a higher timestamp.
   668  	require.NoError(t, s.DB.Put(ctx, key, "newer value"))
   669  
   670  	// Now we write and expect a WriteTooOld.
   671  	intentVal := []byte("test")
   672  	err = txn.Put(ctx, key, intentVal)
   673  	require.IsType(t, &roachpb.TransactionRetryWithProtoRefreshError{}, err)
   674  	require.Regexp(t, "WriteTooOld", err)
   675  
   676  	// Check that the intent was left behind.
   677  	b := kv.Batch{}
   678  	b.Header.ReadConsistency = roachpb.READ_UNCOMMITTED
   679  	b.Get(key)
   680  	require.NoError(t, s.DB.Run(ctx, &b))
   681  	getResp := b.RawResponse().Responses[0].GetGet()
   682  	require.NotNil(t, getResp)
   683  	intent := getResp.IntentValue
   684  	require.NotNil(t, intent)
   685  	intentBytes, err := intent.GetBytes()
   686  	require.NoError(t, err)
   687  	require.Equal(t, intentVal, intentBytes)
   688  
   689  	// Cleanup.
   690  	require.NoError(t, txn.Rollback(ctx))
   691  }
   692  
   693  // Test that a transaction can be used after a CPut returns a
   694  // ConditionFailedError. This is not generally allowed for other errors, but
   695  // ConditionFailedError is special.
   696  func TestTxnContinueAfterCputError(t *testing.T) {
   697  	defer leaktest.AfterTest(t)()
   698  	ctx := context.Background()
   699  	s := createTestDB(t)
   700  	defer s.Stop()
   701  
   702  	txn := s.DB.NewTxn(ctx, "test txn")
   703  	expVal := roachpb.MakeValueFromString("dummy")
   704  	err := txn.CPut(ctx, "a", "val", &expVal)
   705  	require.IsType(t, &roachpb.ConditionFailedError{}, err)
   706  
   707  	require.NoError(t, txn.Put(ctx, "a", "b'"))
   708  	require.NoError(t, txn.Commit(ctx))
   709  }