github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go (about)

     1  // Copyright 2018 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package kvcoord
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"math"
    17  	"strings"
    18  	"testing"
    19  
    20  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
    21  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    22  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    23  	"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
    24  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    25  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    26  	"github.com/stretchr/testify/require"
    27  )
    28  
    29  // mockLockedSender implements the lockedSender interface and provides a way to
    30  // mock out and adjust the SendLocked method. If no mock function is set, a call
    31  // to SendLocked will return the default successful response.
    32  type mockLockedSender struct {
    33  	mockFn func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)
    34  }
    35  
    36  func (m *mockLockedSender) SendLocked(
    37  	ctx context.Context, ba roachpb.BatchRequest,
    38  ) (*roachpb.BatchResponse, *roachpb.Error) {
    39  	if m.mockFn == nil {
    40  		br := ba.CreateReply()
    41  		br.Txn = ba.Txn
    42  		return br, nil
    43  	}
    44  	return m.mockFn(ba)
    45  }
    46  
    47  // MockSend sets the mockLockedSender mocking function.
    48  func (m *mockLockedSender) MockSend(
    49  	fn func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error),
    50  ) {
    51  	m.mockFn = fn
    52  }
    53  
    54  // Reset resets the mockLockedSender mocking function to a no-op.
    55  func (m *mockLockedSender) Reset() {
    56  	m.mockFn = nil
    57  }
    58  
    59  // ChainMockSend sets a series of mocking functions on the mockLockedSender.
    60  // The provided mocking functions are set in the order that they are provided
    61  // and a given mocking function is set after the previous one has been called.
    62  func (m *mockLockedSender) ChainMockSend(
    63  	fns ...func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error),
    64  ) {
    65  	for i := range fns {
    66  		i := i
    67  		fn := fns[i]
    68  		fns[i] = func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
    69  			if i < len(fns)-1 {
    70  				m.mockFn = fns[i+1]
    71  			}
    72  			return fn(ba)
    73  		}
    74  	}
    75  	m.mockFn = fns[0]
    76  }
    77  
    78  func makeMockTxnPipeliner() (txnPipeliner, *mockLockedSender) {
    79  	mockSender := &mockLockedSender{}
    80  	return txnPipeliner{
    81  		st:      cluster.MakeTestingClusterSettings(),
    82  		wrapped: mockSender,
    83  	}, mockSender
    84  }
    85  
    86  func makeTxnProto() roachpb.Transaction {
    87  	return roachpb.MakeTransaction("test", []byte("key"), 0, hlc.Timestamp{WallTime: 10}, 0)
    88  }
    89  
    90  // TestTxnPipeliner1PCTransaction tests that the writes performed by 1PC
    91  // transactions are not pipelined by the txnPipeliner. It also tests that the
    92  // interceptor attaches any locks that the batch will acquire as lock spans to
    93  // the EndTxn request except for those locks that correspond to point writes,
    94  // which are attached to the EndTxn request separately.
    95  func TestTxnPipeliner1PCTransaction(t *testing.T) {
    96  	defer leaktest.AfterTest(t)()
    97  	ctx := context.Background()
    98  	tp, mockSender := makeMockTxnPipeliner()
    99  
   100  	txn := makeTxnProto()
   101  	keyA, keyB := roachpb.Key("a"), roachpb.Key("b")
   102  	keyC, keyD := roachpb.Key("c"), roachpb.Key("d")
   103  
   104  	var ba roachpb.BatchRequest
   105  	ba.Header = roachpb.Header{Txn: &txn}
   106  	scanArgs := roachpb.ScanRequest{
   107  		RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB},
   108  		KeyLocking:    lock.Exclusive,
   109  	}
   110  	ba.Add(&scanArgs)
   111  	putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}
   112  	putArgs.Sequence = 1
   113  	ba.Add(&putArgs)
   114  	delRngArgs := roachpb.DeleteRangeRequest{
   115  		RequestHeader: roachpb.RequestHeader{Key: keyC, EndKey: keyD},
   116  	}
   117  	delRngArgs.Sequence = 2
   118  	ba.Add(&delRngArgs)
   119  	ba.Add(&roachpb.EndTxnRequest{Commit: true})
   120  
   121  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   122  		require.Len(t, ba.Requests, 4)
   123  		require.False(t, ba.AsyncConsensus)
   124  		require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[0].GetInner())
   125  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
   126  		require.IsType(t, &roachpb.DeleteRangeRequest{}, ba.Requests[2].GetInner())
   127  		require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[3].GetInner())
   128  
   129  		etReq := ba.Requests[3].GetEndTxn()
   130  		expLocks := []roachpb.Span{
   131  			{Key: keyA, EndKey: keyB},
   132  			{Key: keyC, EndKey: keyD},
   133  		}
   134  		require.Equal(t, expLocks, etReq.LockSpans)
   135  		expInFlight := []roachpb.SequencedWrite{
   136  			{Key: keyA, Sequence: 1},
   137  		}
   138  		require.Equal(t, expInFlight, etReq.InFlightWrites)
   139  
   140  		br := ba.CreateReply()
   141  		br.Txn = ba.Txn
   142  		br.Txn.Status = roachpb.COMMITTED
   143  		return br, nil
   144  	})
   145  
   146  	br, pErr := tp.SendLocked(ctx, ba)
   147  	require.Nil(t, pErr)
   148  	require.NotNil(t, br)
   149  	require.Equal(t, 0, tp.ifWrites.len())
   150  }
   151  
   152  // TestTxnPipelinerTrackInFlightWrites tests that txnPipeliner tracks writes
   153  // that were performed with async consensus. It also tests that these writes are
   154  // proved as requests are chained onto them. Finally, it tests that EndTxn
   155  // requests chain on to all existing requests.
   156  func TestTxnPipelinerTrackInFlightWrites(t *testing.T) {
   157  	defer leaktest.AfterTest(t)()
   158  	ctx := context.Background()
   159  	tp, mockSender := makeMockTxnPipeliner()
   160  
   161  	txn := makeTxnProto()
   162  	keyA := roachpb.Key("a")
   163  
   164  	var ba roachpb.BatchRequest
   165  	ba.Header = roachpb.Header{Txn: &txn}
   166  	putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}
   167  	putArgs.Sequence = 1
   168  	ba.Add(&putArgs)
   169  
   170  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   171  		require.Len(t, ba.Requests, 1)
   172  		require.True(t, ba.AsyncConsensus)
   173  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   174  
   175  		br := ba.CreateReply()
   176  		br.Txn = ba.Txn
   177  		return br, nil
   178  	})
   179  
   180  	br, pErr := tp.SendLocked(ctx, ba)
   181  	require.Nil(t, pErr)
   182  	require.NotNil(t, br)
   183  	require.Equal(t, 1, tp.ifWrites.len())
   184  
   185  	w := tp.ifWrites.t.Min().(*inFlightWrite)
   186  	require.Equal(t, putArgs.Key, w.Key)
   187  	require.Equal(t, putArgs.Sequence, w.Sequence)
   188  
   189  	// More writes, one that replaces the other's sequence number.
   190  	keyB, keyC := roachpb.Key("b"), roachpb.Key("c")
   191  	ba.Requests = nil
   192  	cputArgs := roachpb.ConditionalPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}
   193  	cputArgs.Sequence = 2
   194  	ba.Add(&cputArgs)
   195  	initPutArgs := roachpb.InitPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}
   196  	initPutArgs.Sequence = 3
   197  	ba.Add(&initPutArgs)
   198  	incArgs := roachpb.IncrementRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}
   199  	incArgs.Sequence = 4
   200  	ba.Add(&incArgs)
   201  	// Write at the same key as another write in the same batch. Will only
   202  	// result in a single in-flight write, at the larger sequence number.
   203  	delArgs := roachpb.DeleteRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}
   204  	delArgs.Sequence = 5
   205  	ba.Add(&delArgs)
   206  
   207  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   208  		require.Len(t, ba.Requests, 5)
   209  		require.True(t, ba.AsyncConsensus)
   210  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
   211  		require.IsType(t, &roachpb.ConditionalPutRequest{}, ba.Requests[1].GetInner())
   212  		require.IsType(t, &roachpb.InitPutRequest{}, ba.Requests[2].GetInner())
   213  		require.IsType(t, &roachpb.IncrementRequest{}, ba.Requests[3].GetInner())
   214  		require.IsType(t, &roachpb.DeleteRequest{}, ba.Requests[4].GetInner())
   215  
   216  		qiReq := ba.Requests[0].GetQueryIntent()
   217  		require.Equal(t, keyA, qiReq.Key)
   218  		require.Equal(t, txn.ID, qiReq.Txn.ID)
   219  		require.Equal(t, txn.WriteTimestamp, qiReq.Txn.WriteTimestamp)
   220  		require.Equal(t, enginepb.TxnSeq(1), qiReq.Txn.Sequence)
   221  		require.True(t, qiReq.ErrorIfMissing)
   222  
   223  		// No in-flight writes have been proved yet.
   224  		require.Equal(t, 1, tp.ifWrites.len())
   225  
   226  		br = ba.CreateReply()
   227  		br.Txn = ba.Txn
   228  		br.Responses[0].GetQueryIntent().FoundIntent = true
   229  		return br, nil
   230  	})
   231  
   232  	br, pErr = tp.SendLocked(ctx, ba)
   233  	require.NotNil(t, br)
   234  	require.Len(t, br.Responses, 4) // QueryIntent response stripped
   235  	require.IsType(t, &roachpb.ConditionalPutResponse{}, br.Responses[0].GetInner())
   236  	require.IsType(t, &roachpb.InitPutResponse{}, br.Responses[1].GetInner())
   237  	require.IsType(t, &roachpb.IncrementResponse{}, br.Responses[2].GetInner())
   238  	require.IsType(t, &roachpb.DeleteResponse{}, br.Responses[3].GetInner())
   239  	require.Nil(t, pErr)
   240  	require.Equal(t, 3, tp.ifWrites.len())
   241  
   242  	wMin := tp.ifWrites.t.Min().(*inFlightWrite)
   243  	require.Equal(t, cputArgs.Key, wMin.Key)
   244  	require.Equal(t, cputArgs.Sequence, wMin.Sequence)
   245  	wMax := tp.ifWrites.t.Max().(*inFlightWrite)
   246  	require.Equal(t, delArgs.Key, wMax.Key)
   247  	require.Equal(t, delArgs.Sequence, wMax.Sequence)
   248  
   249  	// Send a final write, along with an EndTxn request. Should attempt to prove
   250  	// all in-flight writes. Should NOT use async consensus.
   251  	keyD := roachpb.Key("d")
   252  	ba.Requests = nil
   253  	putArgs2 := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}}
   254  	putArgs2.Sequence = 6
   255  	ba.Add(&putArgs2)
   256  	etArgs := roachpb.EndTxnRequest{Commit: true}
   257  	etArgs.Sequence = 7
   258  	ba.Add(&etArgs)
   259  
   260  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   261  		require.Len(t, ba.Requests, 5)
   262  		require.False(t, ba.AsyncConsensus)
   263  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   264  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[1].GetInner())
   265  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[2].GetInner())
   266  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[3].GetInner())
   267  		require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[4].GetInner())
   268  
   269  		qiReq1 := ba.Requests[1].GetQueryIntent()
   270  		qiReq2 := ba.Requests[2].GetQueryIntent()
   271  		qiReq3 := ba.Requests[3].GetQueryIntent()
   272  		require.Equal(t, keyA, qiReq1.Key)
   273  		require.Equal(t, keyB, qiReq2.Key)
   274  		require.Equal(t, keyC, qiReq3.Key)
   275  		require.Equal(t, enginepb.TxnSeq(2), qiReq1.Txn.Sequence)
   276  		require.Equal(t, enginepb.TxnSeq(3), qiReq2.Txn.Sequence)
   277  		require.Equal(t, enginepb.TxnSeq(5), qiReq3.Txn.Sequence)
   278  
   279  		etReq := ba.Requests[4].GetEndTxn()
   280  		require.Equal(t, []roachpb.Span{{Key: keyA}}, etReq.LockSpans)
   281  		expInFlight := []roachpb.SequencedWrite{
   282  			{Key: keyA, Sequence: 2},
   283  			{Key: keyB, Sequence: 3},
   284  			{Key: keyC, Sequence: 5},
   285  			{Key: keyD, Sequence: 6},
   286  		}
   287  		require.Equal(t, expInFlight, etReq.InFlightWrites)
   288  
   289  		br = ba.CreateReply()
   290  		br.Txn = ba.Txn
   291  		br.Txn.Status = roachpb.COMMITTED
   292  		br.Responses[1].GetQueryIntent().FoundIntent = true
   293  		br.Responses[2].GetQueryIntent().FoundIntent = true
   294  		br.Responses[3].GetQueryIntent().FoundIntent = true
   295  		return br, nil
   296  	})
   297  
   298  	br, pErr = tp.SendLocked(ctx, ba)
   299  	require.NotNil(t, br)
   300  	require.Len(t, br.Responses, 2) // QueryIntent response stripped
   301  	require.Nil(t, pErr)
   302  	require.Equal(t, 0, tp.ifWrites.len())
   303  }
   304  
   305  // TestTxnPipelinerReads tests that txnPipeliner will never instruct batches
   306  // with reads in them to use async consensus. It also tests that these reading
   307  // batches will still chain on to in-flight writes, if necessary.
   308  func TestTxnPipelinerReads(t *testing.T) {
   309  	defer leaktest.AfterTest(t)()
   310  	ctx := context.Background()
   311  	tp, mockSender := makeMockTxnPipeliner()
   312  
   313  	txn := makeTxnProto()
   314  	keyA, keyC := roachpb.Key("a"), roachpb.Key("c")
   315  
   316  	// Read-only.
   317  	var ba roachpb.BatchRequest
   318  	ba.Header = roachpb.Header{Txn: &txn}
   319  	ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   320  
   321  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   322  		require.Len(t, ba.Requests, 1)
   323  		require.False(t, ba.AsyncConsensus)
   324  		require.IsType(t, &roachpb.GetRequest{}, ba.Requests[0].GetInner())
   325  
   326  		br := ba.CreateReply()
   327  		br.Txn = ba.Txn
   328  		return br, nil
   329  	})
   330  
   331  	br, pErr := tp.SendLocked(ctx, ba)
   332  	require.Nil(t, pErr)
   333  	require.NotNil(t, br)
   334  
   335  	// Read before write.
   336  	ba.Requests = nil
   337  	ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   338  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
   339  
   340  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   341  		require.Len(t, ba.Requests, 2)
   342  		require.False(t, ba.AsyncConsensus)
   343  		require.IsType(t, &roachpb.GetRequest{}, ba.Requests[0].GetInner())
   344  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
   345  
   346  		br = ba.CreateReply()
   347  		br.Txn = ba.Txn
   348  		return br, nil
   349  	})
   350  
   351  	br, pErr = tp.SendLocked(ctx, ba)
   352  	require.Nil(t, pErr)
   353  	require.NotNil(t, br)
   354  
   355  	// Read after write.
   356  	ba.Requests = nil
   357  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
   358  	ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   359  
   360  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   361  		require.Len(t, ba.Requests, 2)
   362  		require.False(t, ba.AsyncConsensus)
   363  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   364  		require.IsType(t, &roachpb.GetRequest{}, ba.Requests[1].GetInner())
   365  
   366  		br = ba.CreateReply()
   367  		br.Txn = ba.Txn
   368  		return br, nil
   369  	})
   370  
   371  	br, pErr = tp.SendLocked(ctx, ba)
   372  	require.Nil(t, pErr)
   373  	require.NotNil(t, br)
   374  
   375  	// Add a key into the in-flight writes set.
   376  	tp.ifWrites.insert(keyA, 10)
   377  	require.Equal(t, 1, tp.ifWrites.len())
   378  
   379  	// Read-only with conflicting in-flight write.
   380  	ba.Requests = nil
   381  	ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   382  
   383  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   384  		require.Len(t, ba.Requests, 2)
   385  		require.False(t, ba.AsyncConsensus)
   386  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
   387  		require.IsType(t, &roachpb.GetRequest{}, ba.Requests[1].GetInner())
   388  
   389  		qiReq := ba.Requests[0].GetQueryIntent()
   390  		require.Equal(t, keyA, qiReq.Key)
   391  		require.Equal(t, enginepb.TxnSeq(10), qiReq.Txn.Sequence)
   392  
   393  		// No in-flight writes have been proved yet.
   394  		require.Equal(t, 1, tp.ifWrites.len())
   395  
   396  		br = ba.CreateReply()
   397  		br.Txn = ba.Txn
   398  		br.Responses[0].GetQueryIntent().FoundIntent = true
   399  		return br, nil
   400  	})
   401  
   402  	br, pErr = tp.SendLocked(ctx, ba)
   403  	require.Nil(t, pErr)
   404  	require.NotNil(t, br)
   405  	require.Equal(t, 0, tp.ifWrites.len())
   406  }
   407  
   408  // TestTxnPipelinerRangedWrites tests that txnPipeliner will never perform
   409  // ranged write operations using async consensus. It also tests that ranged
   410  // writes will correctly chain on to existing in-flight writes.
   411  func TestTxnPipelinerRangedWrites(t *testing.T) {
   412  	defer leaktest.AfterTest(t)()
   413  	ctx := context.Background()
   414  	tp, mockSender := makeMockTxnPipeliner()
   415  
   416  	txn := makeTxnProto()
   417  	keyA, keyD := roachpb.Key("a"), roachpb.Key("d")
   418  
   419  	var ba roachpb.BatchRequest
   420  	ba.Header = roachpb.Header{Txn: &txn}
   421  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   422  	ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyD}})
   423  
   424  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   425  		require.Len(t, ba.Requests, 2)
   426  		require.False(t, ba.AsyncConsensus)
   427  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   428  		require.IsType(t, &roachpb.DeleteRangeRequest{}, ba.Requests[1].GetInner())
   429  
   430  		br := ba.CreateReply()
   431  		br.Txn = ba.Txn
   432  		return br, nil
   433  	})
   434  
   435  	br, pErr := tp.SendLocked(ctx, ba)
   436  	require.Nil(t, pErr)
   437  	require.NotNil(t, br)
   438  	// The PutRequest was not run asynchronously, so it is not outstanding.
   439  	require.Equal(t, 0, tp.ifWrites.len())
   440  
   441  	// Add five keys into the in-flight writes set, one of which overlaps with
   442  	// the Put request and two others which also overlap with the DeleteRange
   443  	// request. Send the batch again and assert that the Put chains onto the
   444  	// first in-flight write and the DeleteRange chains onto the second and
   445  	// third in-flight write.
   446  	tp.ifWrites.insert(roachpb.Key("a"), 10)
   447  	tp.ifWrites.insert(roachpb.Key("b"), 11)
   448  	tp.ifWrites.insert(roachpb.Key("c"), 12)
   449  	tp.ifWrites.insert(roachpb.Key("d"), 13)
   450  	tp.ifWrites.insert(roachpb.Key("e"), 13)
   451  	require.Equal(t, 5, tp.ifWrites.len())
   452  
   453  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   454  		require.Len(t, ba.Requests, 5)
   455  		require.False(t, ba.AsyncConsensus)
   456  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
   457  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
   458  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[2].GetInner())
   459  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[3].GetInner())
   460  		require.IsType(t, &roachpb.DeleteRangeRequest{}, ba.Requests[4].GetInner())
   461  
   462  		qiReq1 := ba.Requests[0].GetQueryIntent()
   463  		qiReq2 := ba.Requests[2].GetQueryIntent()
   464  		qiReq3 := ba.Requests[3].GetQueryIntent()
   465  		require.Equal(t, roachpb.Key("a"), qiReq1.Key)
   466  		require.Equal(t, roachpb.Key("b"), qiReq2.Key)
   467  		require.Equal(t, roachpb.Key("c"), qiReq3.Key)
   468  		require.Equal(t, txn.ID, qiReq1.Txn.ID)
   469  		require.Equal(t, txn.ID, qiReq2.Txn.ID)
   470  		require.Equal(t, txn.ID, qiReq3.Txn.ID)
   471  		require.Equal(t, enginepb.TxnSeq(10), qiReq1.Txn.Sequence)
   472  		require.Equal(t, enginepb.TxnSeq(11), qiReq2.Txn.Sequence)
   473  		require.Equal(t, enginepb.TxnSeq(12), qiReq3.Txn.Sequence)
   474  
   475  		// No in-flight writes have been proved yet.
   476  		require.Equal(t, 5, tp.ifWrites.len())
   477  
   478  		br = ba.CreateReply()
   479  		br.Txn = ba.Txn
   480  		br.Responses[0].GetQueryIntent().FoundIntent = true
   481  		br.Responses[2].GetQueryIntent().FoundIntent = true
   482  		br.Responses[3].GetQueryIntent().FoundIntent = true
   483  		return br, nil
   484  	})
   485  
   486  	br, pErr = tp.SendLocked(ctx, ba)
   487  	require.Nil(t, pErr)
   488  	require.NotNil(t, br)
   489  	require.Equal(t, 2, tp.ifWrites.len())
   490  }
   491  
   492  // TestTxnPipelinerNonTransactionalRequests tests that non-transaction requests
   493  // cause the txnPipeliner to stall its entire pipeline.
   494  func TestTxnPipelinerNonTransactionalRequests(t *testing.T) {
   495  	defer leaktest.AfterTest(t)()
   496  	ctx := context.Background()
   497  	tp, mockSender := makeMockTxnPipeliner()
   498  
   499  	txn := makeTxnProto()
   500  	keyA, keyC := roachpb.Key("a"), roachpb.Key("c")
   501  
   502  	var ba roachpb.BatchRequest
   503  	ba.Header = roachpb.Header{Txn: &txn}
   504  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   505  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
   506  
   507  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   508  		require.Len(t, ba.Requests, 2)
   509  		require.True(t, ba.AsyncConsensus)
   510  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   511  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
   512  
   513  		br := ba.CreateReply()
   514  		br.Txn = ba.Txn
   515  		return br, nil
   516  	})
   517  
   518  	br, pErr := tp.SendLocked(ctx, ba)
   519  	require.Nil(t, pErr)
   520  	require.NotNil(t, br)
   521  	require.Equal(t, 2, tp.ifWrites.len())
   522  
   523  	// Send a non-transactional request. Should stall pipeline and chain onto
   524  	// all in-flight writes, even if its header doesn't imply any interaction.
   525  	keyRangeDesc := roachpb.Key("rangeDesc")
   526  	ba.Requests = nil
   527  	ba.Add(&roachpb.SubsumeRequest{
   528  		RequestHeader: roachpb.RequestHeader{Key: keyRangeDesc},
   529  	})
   530  
   531  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   532  		require.Len(t, ba.Requests, 3)
   533  		require.False(t, ba.AsyncConsensus)
   534  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
   535  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[1].GetInner())
   536  		require.IsType(t, &roachpb.SubsumeRequest{}, ba.Requests[2].GetInner())
   537  
   538  		qiReq1 := ba.Requests[0].GetQueryIntent()
   539  		qiReq2 := ba.Requests[1].GetQueryIntent()
   540  		require.Equal(t, keyA, qiReq1.Key)
   541  		require.Equal(t, keyC, qiReq2.Key)
   542  
   543  		br = ba.CreateReply()
   544  		br.Txn = ba.Txn
   545  		br.Responses[0].GetQueryIntent().FoundIntent = true
   546  		br.Responses[1].GetQueryIntent().FoundIntent = true
   547  		return br, nil
   548  	})
   549  
   550  	br, pErr = tp.SendLocked(ctx, ba)
   551  	require.Nil(t, pErr)
   552  	require.NotNil(t, br)
   553  	require.Equal(t, 0, tp.ifWrites.len())
   554  }
   555  
   556  // TestTxnPipelinerManyWrites tests that a txnPipeliner behaves correctly even
   557  // when its in-flight write tree grows to a very large size.
   558  func TestTxnPipelinerManyWrites(t *testing.T) {
   559  	defer leaktest.AfterTest(t)()
   560  	ctx := context.Background()
   561  	tp, mockSender := makeMockTxnPipeliner()
   562  
   563  	// Disable write_pipelining_max_outstanding_size,
   564  	// write_pipelining_max_batch_size, and max_intents_bytes limits.
   565  	pipelinedWritesMaxInFlightSize.Override(&tp.st.SV, math.MaxInt64)
   566  	pipelinedWritesMaxBatchSize.Override(&tp.st.SV, 0)
   567  	trackedWritesMaxSize.Override(&tp.st.SV, math.MaxInt64)
   568  
   569  	const writes = 2048
   570  	keyBuf := roachpb.Key(strings.Repeat("a", writes+1))
   571  	makeKey := func(i int) roachpb.Key { return keyBuf[:i+1] }
   572  	makeSeq := func(i int) enginepb.TxnSeq { return enginepb.TxnSeq(i) + 1 }
   573  
   574  	txn := makeTxnProto()
   575  	var ba roachpb.BatchRequest
   576  	ba.Header = roachpb.Header{Txn: &txn}
   577  	for i := 0; i < writes; i++ {
   578  		putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: makeKey(i)}}
   579  		putArgs.Sequence = makeSeq(i)
   580  		ba.Add(&putArgs)
   581  	}
   582  
   583  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   584  		require.Len(t, ba.Requests, writes)
   585  		require.True(t, ba.AsyncConsensus)
   586  		for i := 0; i < writes; i++ {
   587  			require.IsType(t, &roachpb.PutRequest{}, ba.Requests[i].GetInner())
   588  		}
   589  
   590  		br := ba.CreateReply()
   591  		br.Txn = ba.Txn
   592  		return br, nil
   593  	})
   594  
   595  	br, pErr := tp.SendLocked(ctx, ba)
   596  	require.Nil(t, pErr)
   597  	require.NotNil(t, br)
   598  	require.Equal(t, writes, tp.ifWrites.len())
   599  
   600  	// Query every other write.
   601  	ba.Requests = nil
   602  	for i := 0; i < writes; i++ {
   603  		if i%2 == 0 {
   604  			key := makeKey(i)
   605  			ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: key}})
   606  		}
   607  	}
   608  
   609  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   610  		require.Len(t, ba.Requests, writes)
   611  		require.False(t, ba.AsyncConsensus)
   612  		for i := 0; i < writes; i++ {
   613  			if i%2 == 0 {
   614  				key := makeKey(i)
   615  				require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[i].GetInner())
   616  				require.IsType(t, &roachpb.GetRequest{}, ba.Requests[i+1].GetInner())
   617  
   618  				qiReq := ba.Requests[i].GetQueryIntent()
   619  				require.Equal(t, key, qiReq.Key)
   620  				require.Equal(t, txn.ID, qiReq.Txn.ID)
   621  				require.Equal(t, makeSeq(i), qiReq.Txn.Sequence)
   622  
   623  				getReq := ba.Requests[i+1].GetGet()
   624  				require.Equal(t, key, getReq.Key)
   625  			}
   626  		}
   627  
   628  		br = ba.CreateReply()
   629  		br.Txn = ba.Txn
   630  		for i := 0; i < writes; i++ {
   631  			if i%2 == 0 {
   632  				br.Responses[i].GetQueryIntent().FoundIntent = true
   633  			}
   634  		}
   635  		return br, nil
   636  	})
   637  
   638  	br, pErr = tp.SendLocked(ctx, ba)
   639  	require.Nil(t, pErr)
   640  	require.NotNil(t, br)
   641  	require.Equal(t, writes/2, tp.ifWrites.len())
   642  
   643  	// Make sure the correct writes are still in-flight.
   644  	expIdx := 1
   645  	tp.ifWrites.ascend(func(w *inFlightWrite) {
   646  		require.Equal(t, makeKey(expIdx), w.Key)
   647  		require.Equal(t, makeSeq(expIdx), w.Sequence)
   648  		expIdx += 2
   649  	})
   650  }
   651  
   652  // TestTxnPipelinerTransactionAbort tests that a txnPipeliner allows an aborting
   653  // EndTxnRequest to proceed without attempting to prove all in-flight writes. It
   654  // also tests that the interceptor attaches lock spans to these EndTxnRequests.
   655  func TestTxnPipelinerTransactionAbort(t *testing.T) {
   656  	defer leaktest.AfterTest(t)()
   657  	ctx := context.Background()
   658  	tp, mockSender := makeMockTxnPipeliner()
   659  
   660  	txn := makeTxnProto()
   661  	keyA := roachpb.Key("a")
   662  
   663  	var ba roachpb.BatchRequest
   664  	ba.Header = roachpb.Header{Txn: &txn}
   665  	putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}
   666  	putArgs.Sequence = 1
   667  	ba.Add(&putArgs)
   668  
   669  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   670  		require.Len(t, ba.Requests, 1)
   671  		require.True(t, ba.AsyncConsensus)
   672  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   673  
   674  		br := ba.CreateReply()
   675  		br.Txn = ba.Txn
   676  		return br, nil
   677  	})
   678  
   679  	br, pErr := tp.SendLocked(ctx, ba)
   680  	require.Nil(t, pErr)
   681  	require.NotNil(t, br)
   682  	require.Equal(t, 1, tp.ifWrites.len())
   683  
   684  	// Send an EndTxn request with commit=false. Should NOT attempt to prove all
   685  	// in-flight writes because its attempting to abort the txn anyway. Should
   686  	// NOT use async consensus.
   687  	//
   688  	// We'll unrealistically return a PENDING transaction, which won't allow the
   689  	// txnPipeliner to clean up.
   690  	ba.Requests = nil
   691  	etArgs := roachpb.EndTxnRequest{Commit: false}
   692  	etArgs.Sequence = 2
   693  	ba.Add(&etArgs)
   694  
   695  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   696  		require.Len(t, ba.Requests, 1)
   697  		require.False(t, ba.AsyncConsensus)
   698  		require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner())
   699  
   700  		etReq := ba.Requests[0].GetEndTxn()
   701  		require.Len(t, etReq.LockSpans, 0)
   702  		require.Equal(t, []roachpb.SequencedWrite{{Key: keyA, Sequence: 1}}, etReq.InFlightWrites)
   703  
   704  		br = ba.CreateReply()
   705  		br.Txn = ba.Txn
   706  		br.Txn.Status = roachpb.PENDING // keep at PENDING
   707  		return br, nil
   708  	})
   709  
   710  	br, pErr = tp.SendLocked(ctx, ba)
   711  	require.Nil(t, pErr)
   712  	require.NotNil(t, br)
   713  	require.Equal(t, 1, tp.ifWrites.len()) // nothing proven
   714  
   715  	// Send EndTxn request with commit=false again. Same deal. This time, return
   716  	// ABORTED transaction. This will allow the txnPipeliner to remove all
   717  	// in-flight writes because they are now uncommittable.
   718  	ba.Requests = nil
   719  	etArgs = roachpb.EndTxnRequest{Commit: false}
   720  	etArgs.Sequence = 2
   721  	ba.Add(&etArgs)
   722  
   723  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   724  		require.Len(t, ba.Requests, 1)
   725  		require.False(t, ba.AsyncConsensus)
   726  		require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner())
   727  
   728  		etReq := ba.Requests[0].GetEndTxn()
   729  		require.Len(t, etReq.LockSpans, 0)
   730  		require.Equal(t, []roachpb.SequencedWrite{{Key: keyA, Sequence: 1}}, etReq.InFlightWrites)
   731  
   732  		br = ba.CreateReply()
   733  		br.Txn = ba.Txn
   734  		br.Txn.Status = roachpb.ABORTED
   735  		return br, nil
   736  	})
   737  
   738  	br, pErr = tp.SendLocked(ctx, ba)
   739  	require.Nil(t, pErr)
   740  	require.NotNil(t, br)
   741  	require.Equal(t, 1, tp.ifWrites.len()) // nothing proven
   742  }
   743  
   744  // TestTxnPipelinerEpochIncrement tests that a txnPipeliner's in-flight write
   745  // set is reset on an epoch increment and that all writes in this set are moved
   746  // to the lock footprint so they will be removed when the transaction finishes.
   747  func TestTxnPipelinerEpochIncrement(t *testing.T) {
   748  	defer leaktest.AfterTest(t)()
   749  	tp, _ := makeMockTxnPipeliner()
   750  
   751  	tp.ifWrites.insert(roachpb.Key("b"), 10)
   752  	tp.ifWrites.insert(roachpb.Key("d"), 11)
   753  	require.Equal(t, 2, tp.ifWrites.len())
   754  	require.Equal(t, 0, len(tp.lockFootprint.asSlice()))
   755  
   756  	tp.epochBumpedLocked()
   757  	require.Equal(t, 0, tp.ifWrites.len())
   758  	require.Equal(t, 2, len(tp.lockFootprint.asSlice()))
   759  }
   760  
   761  // TestTxnPipelinerIntentMissingError tests that a txnPipeliner transforms an
   762  // IntentMissingError into a TransactionRetryError. It also ensures that it
   763  // fixes the errors index.
   764  func TestTxnPipelinerIntentMissingError(t *testing.T) {
   765  	defer leaktest.AfterTest(t)()
   766  	ctx := context.Background()
   767  	tp, mockSender := makeMockTxnPipeliner()
   768  
   769  	txn := makeTxnProto()
   770  	keyA, keyB := roachpb.Key("a"), roachpb.Key("b")
   771  	keyC, keyD := roachpb.Key("c"), roachpb.Key("d")
   772  
   773  	var ba roachpb.BatchRequest
   774  	ba.Header = roachpb.Header{Txn: &txn}
   775  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   776  	ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyB, EndKey: keyD}})
   777  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}})
   778  
   779  	// Insert in-flight writes into the in-flight write set so that each request
   780  	// will need to chain on with a QueryIntent.
   781  	tp.ifWrites.insert(keyA, 1)
   782  	tp.ifWrites.insert(keyB, 2)
   783  	tp.ifWrites.insert(keyC, 3)
   784  	tp.ifWrites.insert(keyD, 4)
   785  
   786  	for errIdx, resErrIdx := range map[int32]int32{
   787  		0: 0, // intent on key "a" missing
   788  		2: 1, // intent on key "b" missing
   789  		3: 1, // intent on key "c" missing
   790  		5: 2, // intent on key "d" missing
   791  	} {
   792  		t.Run(fmt.Sprintf("errIdx=%d", errIdx), func(t *testing.T) {
   793  			mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   794  				require.Len(t, ba.Requests, 7)
   795  				require.False(t, ba.AsyncConsensus)
   796  				require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
   797  				require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
   798  				require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[2].GetInner())
   799  				require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[3].GetInner())
   800  				require.IsType(t, &roachpb.DeleteRangeRequest{}, ba.Requests[4].GetInner())
   801  				require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[5].GetInner())
   802  				require.IsType(t, &roachpb.PutRequest{}, ba.Requests[6].GetInner())
   803  
   804  				err := roachpb.NewIntentMissingError(nil /* key */, nil /* intent */)
   805  				pErr := roachpb.NewErrorWithTxn(err, &txn)
   806  				pErr.SetErrorIndex(errIdx)
   807  				return nil, pErr
   808  			})
   809  
   810  			br, pErr := tp.SendLocked(ctx, ba)
   811  			require.Nil(t, br)
   812  			require.NotNil(t, pErr)
   813  			require.Equal(t, &txn, pErr.GetTxn())
   814  			require.Equal(t, resErrIdx, pErr.Index.Index)
   815  			require.IsType(t, &roachpb.TransactionRetryError{}, pErr.GetDetail())
   816  			require.Equal(t, roachpb.RETRY_ASYNC_WRITE_FAILURE, pErr.GetDetail().(*roachpb.TransactionRetryError).Reason)
   817  		})
   818  	}
   819  }
   820  
   821  // TestTxnPipelinerEnableDisableMixTxn tests that the txnPipeliner behaves
   822  // correctly if pipelining is enabled or disabled midway through a transaction.
   823  func TestTxnPipelinerEnableDisableMixTxn(t *testing.T) {
   824  	defer leaktest.AfterTest(t)()
   825  	ctx := context.Background()
   826  	tp, mockSender := makeMockTxnPipeliner()
   827  
   828  	// Start with pipelining disabled. Should NOT use async consensus.
   829  	pipelinedWritesEnabled.Override(&tp.st.SV, false)
   830  
   831  	txn := makeTxnProto()
   832  	keyA, keyC := roachpb.Key("a"), roachpb.Key("c")
   833  
   834  	var ba roachpb.BatchRequest
   835  	ba.Header = roachpb.Header{Txn: &txn}
   836  	putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}
   837  	putArgs.Sequence = 1
   838  	ba.Add(&putArgs)
   839  
   840  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   841  		require.Len(t, ba.Requests, 1)
   842  		require.False(t, ba.AsyncConsensus)
   843  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   844  
   845  		br := ba.CreateReply()
   846  		br.Txn = ba.Txn
   847  		return br, nil
   848  	})
   849  
   850  	br, pErr := tp.SendLocked(ctx, ba)
   851  	require.Nil(t, pErr)
   852  	require.NotNil(t, br)
   853  	require.Equal(t, 0, tp.ifWrites.len())
   854  
   855  	// Enable pipelining. Should use async consensus.
   856  	pipelinedWritesEnabled.Override(&tp.st.SV, true)
   857  
   858  	ba.Requests = nil
   859  	putArgs2 := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}
   860  	putArgs2.Sequence = 2
   861  	ba.Add(&putArgs2)
   862  	putArgs3 := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}
   863  	putArgs3.Sequence = 3
   864  	ba.Add(&putArgs3)
   865  
   866  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   867  		require.Len(t, ba.Requests, 2)
   868  		require.True(t, ba.AsyncConsensus)
   869  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
   870  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
   871  
   872  		br = ba.CreateReply()
   873  		br.Txn = ba.Txn
   874  		return br, nil
   875  	})
   876  
   877  	br, pErr = tp.SendLocked(ctx, ba)
   878  	require.Nil(t, pErr)
   879  	require.NotNil(t, br)
   880  	require.Equal(t, 2, tp.ifWrites.len())
   881  
   882  	// Disable pipelining again. Should NOT use async consensus but should still
   883  	// make sure to chain on to any overlapping in-flight writes.
   884  	pipelinedWritesEnabled.Override(&tp.st.SV, false)
   885  
   886  	ba.Requests = nil
   887  	putArgs4 := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}
   888  	putArgs4.Sequence = 4
   889  	ba.Add(&putArgs4)
   890  
   891  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   892  		require.Len(t, ba.Requests, 2)
   893  		require.False(t, ba.AsyncConsensus)
   894  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
   895  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
   896  
   897  		qiReq := ba.Requests[0].GetQueryIntent()
   898  		require.Equal(t, keyA, qiReq.Key)
   899  		require.Equal(t, enginepb.TxnSeq(2), qiReq.Txn.Sequence)
   900  
   901  		br = ba.CreateReply()
   902  		br.Txn = ba.Txn
   903  		br.Responses[0].GetQueryIntent().FoundIntent = true
   904  		return br, nil
   905  	})
   906  
   907  	br, pErr = tp.SendLocked(ctx, ba)
   908  	require.Nil(t, pErr)
   909  	require.NotNil(t, br)
   910  	require.Equal(t, 1, tp.ifWrites.len())
   911  
   912  	// Commit the txn. Again with pipeling disabled. Again, in-flight writes
   913  	// should be proven first.
   914  	ba.Requests = nil
   915  	etArgs := roachpb.EndTxnRequest{Commit: true}
   916  	etArgs.Sequence = 5
   917  	ba.Add(&etArgs)
   918  
   919  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   920  		require.Len(t, ba.Requests, 2)
   921  		require.False(t, ba.AsyncConsensus)
   922  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
   923  		require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[1].GetInner())
   924  
   925  		qiReq := ba.Requests[0].GetQueryIntent()
   926  		require.Equal(t, keyC, qiReq.Key)
   927  		require.Equal(t, enginepb.TxnSeq(3), qiReq.Txn.Sequence)
   928  
   929  		etReq := ba.Requests[1].GetEndTxn()
   930  		require.Equal(t, []roachpb.Span{{Key: keyA}}, etReq.LockSpans)
   931  		require.Equal(t, []roachpb.SequencedWrite{{Key: keyC, Sequence: 3}}, etReq.InFlightWrites)
   932  
   933  		br = ba.CreateReply()
   934  		br.Txn = ba.Txn
   935  		br.Txn.Status = roachpb.COMMITTED
   936  		br.Responses[0].GetQueryIntent().FoundIntent = true
   937  		return br, nil
   938  	})
   939  
   940  	br, pErr = tp.SendLocked(ctx, ba)
   941  	require.Nil(t, pErr)
   942  	require.NotNil(t, br)
   943  	require.Equal(t, 0, tp.ifWrites.len())
   944  }
   945  
   946  // TestTxnPipelinerMaxInFlightSize tests that batches are not pipelined if
   947  // doing so would push the memory used to track in-flight writes over the
   948  // limit allowed by the kv.transaction.write_pipelining_max_outstanding_size
   949  // setting.
   950  func TestTxnPipelinerMaxInFlightSize(t *testing.T) {
   951  	defer leaktest.AfterTest(t)()
   952  	ctx := context.Background()
   953  	tp, mockSender := makeMockTxnPipeliner()
   954  
   955  	// Set maxInFlightSize limit to 3 bytes.
   956  	pipelinedWritesMaxInFlightSize.Override(&tp.st.SV, 3)
   957  
   958  	txn := makeTxnProto()
   959  	keyA, keyB := roachpb.Key("a"), roachpb.Key("b")
   960  	keyC, keyD := roachpb.Key("c"), roachpb.Key("d")
   961  
   962  	// Send a batch that would exceed the limit.
   963  	var ba roachpb.BatchRequest
   964  	ba.Header = roachpb.Header{Txn: &txn}
   965  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   966  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}})
   967  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
   968  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}})
   969  
   970  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   971  		require.Len(t, ba.Requests, 4)
   972  		require.False(t, ba.AsyncConsensus)
   973  
   974  		br := ba.CreateReply()
   975  		br.Txn = ba.Txn
   976  		return br, nil
   977  	})
   978  
   979  	br, pErr := tp.SendLocked(ctx, ba)
   980  	require.Nil(t, pErr)
   981  	require.NotNil(t, br)
   982  	require.Equal(t, int64(0), tp.ifWrites.byteSize())
   983  
   984  	// Send a batch that is equal to the limit.
   985  	ba.Requests = nil
   986  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
   987  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}})
   988  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
   989  
   990  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
   991  		require.Len(t, ba.Requests, 3)
   992  		require.True(t, ba.AsyncConsensus)
   993  
   994  		br = ba.CreateReply()
   995  		br.Txn = ba.Txn
   996  		return br, nil
   997  	})
   998  
   999  	br, pErr = tp.SendLocked(ctx, ba)
  1000  	require.Nil(t, pErr)
  1001  	require.NotNil(t, br)
  1002  	require.Equal(t, int64(3), tp.ifWrites.byteSize())
  1003  
  1004  	// Send a batch that would be under the limit if we weren't already at it.
  1005  	ba.Requests = nil
  1006  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}})
  1007  
  1008  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1009  		require.Len(t, ba.Requests, 1)
  1010  		require.False(t, ba.AsyncConsensus)
  1011  
  1012  		br = ba.CreateReply()
  1013  		br.Txn = ba.Txn
  1014  		return br, nil
  1015  	})
  1016  
  1017  	br, pErr = tp.SendLocked(ctx, ba)
  1018  	require.Nil(t, pErr)
  1019  	require.NotNil(t, br)
  1020  	require.Equal(t, int64(3), tp.ifWrites.byteSize())
  1021  
  1022  	// Send a batch that proves two of the in-flight writes.
  1023  	ba.Requests = nil
  1024  	ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
  1025  	ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}})
  1026  
  1027  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1028  		require.Len(t, ba.Requests, 4)
  1029  		require.False(t, ba.AsyncConsensus)
  1030  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
  1031  		require.IsType(t, &roachpb.GetRequest{}, ba.Requests[1].GetInner())
  1032  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[2].GetInner())
  1033  		require.IsType(t, &roachpb.GetRequest{}, ba.Requests[3].GetInner())
  1034  
  1035  		br = ba.CreateReply()
  1036  		br.Txn = ba.Txn
  1037  		br.Responses[0].GetQueryIntent().FoundIntent = true
  1038  		br.Responses[2].GetQueryIntent().FoundIntent = true
  1039  		return br, nil
  1040  	})
  1041  
  1042  	br, pErr = tp.SendLocked(ctx, ba)
  1043  	require.Nil(t, pErr)
  1044  	require.NotNil(t, br)
  1045  	require.Equal(t, int64(1), tp.ifWrites.byteSize())
  1046  
  1047  	// Now that we're not up against the limit, send a batch that proves one
  1048  	// write and immediately writes it again, along with a second write.
  1049  	ba.Requests = nil
  1050  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}})
  1051  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
  1052  
  1053  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1054  		require.Len(t, ba.Requests, 3)
  1055  		require.True(t, ba.AsyncConsensus)
  1056  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
  1057  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[1].GetInner())
  1058  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[2].GetInner())
  1059  
  1060  		br = ba.CreateReply()
  1061  		br.Txn = ba.Txn
  1062  		br.Responses[1].GetQueryIntent().FoundIntent = true
  1063  		return br, nil
  1064  	})
  1065  
  1066  	br, pErr = tp.SendLocked(ctx, ba)
  1067  	require.Nil(t, pErr)
  1068  	require.NotNil(t, br)
  1069  	require.Equal(t, int64(2), tp.ifWrites.byteSize())
  1070  
  1071  	// Send the same batch again. Even though it would prove two in-flight
  1072  	// writes while performing two others, we won't allow it to perform async
  1073  	// consensus because the estimation is conservative.
  1074  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1075  		require.Len(t, ba.Requests, 4)
  1076  		require.False(t, ba.AsyncConsensus)
  1077  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
  1078  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
  1079  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[2].GetInner())
  1080  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[3].GetInner())
  1081  
  1082  		br = ba.CreateReply()
  1083  		br.Txn = ba.Txn
  1084  		br.Responses[0].GetQueryIntent().FoundIntent = true
  1085  		br.Responses[2].GetQueryIntent().FoundIntent = true
  1086  		return br, nil
  1087  	})
  1088  
  1089  	br, pErr = tp.SendLocked(ctx, ba)
  1090  	require.Nil(t, pErr)
  1091  	require.NotNil(t, br)
  1092  	require.Equal(t, int64(0), tp.ifWrites.byteSize())
  1093  
  1094  	// Increase maxInFlightSize limit to 5 bytes.
  1095  	pipelinedWritesMaxInFlightSize.Override(&tp.st.SV, 5)
  1096  
  1097  	// The original batch with 4 writes should succeed.
  1098  	ba.Requests = nil
  1099  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
  1100  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}})
  1101  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
  1102  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}})
  1103  
  1104  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1105  		require.Len(t, ba.Requests, 4)
  1106  		require.True(t, ba.AsyncConsensus)
  1107  
  1108  		br = ba.CreateReply()
  1109  		br.Txn = ba.Txn
  1110  		return br, nil
  1111  	})
  1112  
  1113  	br, pErr = tp.SendLocked(ctx, ba)
  1114  	require.Nil(t, pErr)
  1115  	require.NotNil(t, br)
  1116  	require.Equal(t, int64(4), tp.ifWrites.byteSize())
  1117  
  1118  	// Bump the txn epoch. The in-flight bytes counter should reset.
  1119  	tp.epochBumpedLocked()
  1120  	require.Equal(t, int64(0), tp.ifWrites.byteSize())
  1121  }
  1122  
  1123  // TestTxnPipelinerMaxBatchSize tests that batches that contain more requests
  1124  // than allowed by the kv.transaction.write_pipelining_max_batch_size setting
  1125  // will not be pipelined.
  1126  func TestTxnPipelinerMaxBatchSize(t *testing.T) {
  1127  	defer leaktest.AfterTest(t)()
  1128  	ctx := context.Background()
  1129  	tp, mockSender := makeMockTxnPipeliner()
  1130  
  1131  	// Set maxBatchSize limit to 1.
  1132  	pipelinedWritesMaxBatchSize.Override(&tp.st.SV, 1)
  1133  
  1134  	txn := makeTxnProto()
  1135  	keyA, keyC := roachpb.Key("a"), roachpb.Key("c")
  1136  
  1137  	// Batch below limit.
  1138  	var ba roachpb.BatchRequest
  1139  	ba.Header = roachpb.Header{Txn: &txn}
  1140  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
  1141  
  1142  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1143  		require.Len(t, ba.Requests, 1)
  1144  		require.True(t, ba.AsyncConsensus)
  1145  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
  1146  
  1147  		br := ba.CreateReply()
  1148  		br.Txn = ba.Txn
  1149  		return br, nil
  1150  	})
  1151  
  1152  	br, pErr := tp.SendLocked(ctx, ba)
  1153  	require.Nil(t, pErr)
  1154  	require.NotNil(t, br)
  1155  	require.Equal(t, 1, tp.ifWrites.len())
  1156  
  1157  	// Batch above limit.
  1158  	ba.Requests = nil
  1159  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
  1160  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}})
  1161  
  1162  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1163  		require.Len(t, ba.Requests, 3)
  1164  		require.False(t, ba.AsyncConsensus)
  1165  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
  1166  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
  1167  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[2].GetInner())
  1168  
  1169  		br = ba.CreateReply()
  1170  		br.Txn = ba.Txn
  1171  		br.Responses[0].GetQueryIntent().FoundIntent = true
  1172  		return br, nil
  1173  	})
  1174  
  1175  	br, pErr = tp.SendLocked(ctx, ba)
  1176  	require.Nil(t, pErr)
  1177  	require.NotNil(t, br)
  1178  	require.Equal(t, 0, tp.ifWrites.len())
  1179  
  1180  	// Increase maxBatchSize limit to 2.
  1181  	pipelinedWritesMaxBatchSize.Override(&tp.st.SV, 2)
  1182  
  1183  	// Same batch now below limit.
  1184  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1185  		require.Len(t, ba.Requests, 2)
  1186  		require.True(t, ba.AsyncConsensus)
  1187  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
  1188  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner())
  1189  
  1190  		br = ba.CreateReply()
  1191  		br.Txn = ba.Txn
  1192  		return br, nil
  1193  	})
  1194  
  1195  	br, pErr = tp.SendLocked(ctx, ba)
  1196  	require.Nil(t, pErr)
  1197  	require.NotNil(t, br)
  1198  	require.Equal(t, 2, tp.ifWrites.len())
  1199  }
  1200  
  1201  // TestTxnPipelinerRecordsLocksOnFailure tests that even when a request returns
  1202  // with an ABORTED transaction status or an error, the locks that it attempted
  1203  // to acquire are added to the lock footprint.
  1204  func TestTxnPipelinerRecordsLocksOnFailure(t *testing.T) {
  1205  	defer leaktest.AfterTest(t)()
  1206  	ctx := context.Background()
  1207  	tp, mockSender := makeMockTxnPipeliner()
  1208  
  1209  	txn := makeTxnProto()
  1210  	keyA, keyB, keyC := roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c")
  1211  	keyD, keyE, keyF := roachpb.Key("d"), roachpb.Key("e"), roachpb.Key("f")
  1212  
  1213  	// Return an error for a point write, a range write, and a range locking
  1214  	// read.
  1215  	var ba roachpb.BatchRequest
  1216  	ba.Header = roachpb.Header{Txn: &txn}
  1217  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}})
  1218  	ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyB, EndKey: keyB.Next()}})
  1219  	ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyC, EndKey: keyC.Next()}, KeyLocking: lock.Exclusive})
  1220  
  1221  	mockPErr := roachpb.NewErrorf("boom")
  1222  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1223  		require.Len(t, ba.Requests, 3)
  1224  		require.False(t, ba.AsyncConsensus)
  1225  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
  1226  		require.IsType(t, &roachpb.DeleteRangeRequest{}, ba.Requests[1].GetInner())
  1227  		require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[2].GetInner())
  1228  
  1229  		return nil, mockPErr
  1230  	})
  1231  
  1232  	br, pErr := tp.SendLocked(ctx, ba)
  1233  	require.Nil(t, br)
  1234  	require.Equal(t, mockPErr, pErr)
  1235  	require.Equal(t, 0, tp.ifWrites.len())
  1236  
  1237  	var expLocks []roachpb.Span
  1238  	expLocks = append(expLocks, roachpb.Span{Key: keyA})
  1239  	expLocks = append(expLocks, roachpb.Span{Key: keyB, EndKey: keyB.Next()})
  1240  	expLocks = append(expLocks, roachpb.Span{Key: keyC, EndKey: keyC.Next()})
  1241  	require.Equal(t, expLocks, tp.lockFootprint.asSlice())
  1242  
  1243  	// Return an ABORTED transaction record for a point write, a range write,
  1244  	// and a range locking read.
  1245  	ba.Requests = nil
  1246  	ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}})
  1247  	ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyE, EndKey: keyE.Next()}})
  1248  	ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyF, EndKey: keyF.Next()}, KeyLocking: lock.Exclusive})
  1249  
  1250  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1251  		require.Len(t, ba.Requests, 3)
  1252  		require.False(t, ba.AsyncConsensus)
  1253  		require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner())
  1254  		require.IsType(t, &roachpb.DeleteRangeRequest{}, ba.Requests[1].GetInner())
  1255  		require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[2].GetInner())
  1256  
  1257  		br = ba.CreateReply()
  1258  		br.Txn = ba.Txn
  1259  		return br, nil
  1260  	})
  1261  
  1262  	br, pErr = tp.SendLocked(ctx, ba)
  1263  	require.Nil(t, pErr)
  1264  	require.NotNil(t, br)
  1265  	require.Equal(t, 0, tp.ifWrites.len())
  1266  
  1267  	expLocks = append(expLocks, roachpb.Span{Key: keyD})
  1268  	expLocks = append(expLocks, roachpb.Span{Key: keyE, EndKey: keyE.Next()})
  1269  	expLocks = append(expLocks, roachpb.Span{Key: keyF, EndKey: keyF.Next()})
  1270  	require.Equal(t, expLocks, tp.lockFootprint.asSlice())
  1271  
  1272  	// The lock spans are all attached to the EndTxn request when one is sent.
  1273  	ba.Requests = nil
  1274  	ba.Add(&roachpb.EndTxnRequest{Commit: false})
  1275  
  1276  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1277  		require.Len(t, ba.Requests, 1)
  1278  		require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner())
  1279  
  1280  		etReq := ba.Requests[0].GetEndTxn()
  1281  		require.Equal(t, expLocks, etReq.LockSpans)
  1282  		require.Len(t, etReq.InFlightWrites, 0)
  1283  
  1284  		br = ba.CreateReply()
  1285  		br.Txn = ba.Txn
  1286  		br.Txn.Status = roachpb.ABORTED
  1287  		return br, nil
  1288  	})
  1289  
  1290  	br, pErr = tp.SendLocked(ctx, ba)
  1291  	require.Nil(t, pErr)
  1292  	require.NotNil(t, br)
  1293  }
  1294  
  1295  // Test that the pipeliners knows how to save and restore its state.
  1296  func TestTxnPipelinerSavepoints(t *testing.T) {
  1297  	defer leaktest.AfterTest(t)()
  1298  	ctx := context.Background()
  1299  	tp, mockSender := makeMockTxnPipeliner()
  1300  
  1301  	initialSavepoint := savepoint{}
  1302  	tp.createSavepointLocked(ctx, &initialSavepoint)
  1303  
  1304  	tp.ifWrites.insert(roachpb.Key("a"), 10)
  1305  	tp.ifWrites.insert(roachpb.Key("b"), 11)
  1306  	tp.ifWrites.insert(roachpb.Key("c"), 12)
  1307  	require.Equal(t, 3, tp.ifWrites.len())
  1308  
  1309  	s := savepoint{seqNum: enginepb.TxnSeq(12), active: true}
  1310  	tp.createSavepointLocked(ctx, &s)
  1311  
  1312  	// Some more writes after the savepoint. One of them is on key "c" that is
  1313  	// part of the savepoint too, so we'll check that, upon rollback, the savepoint is
  1314  	// updated to remove the lower-seq-num write to "c" that it was tracking as in-flight.
  1315  	tp.ifWrites.insert(roachpb.Key("c"), 13)
  1316  	tp.ifWrites.insert(roachpb.Key("d"), 14)
  1317  	require.Empty(t, tp.lockFootprint.asSlice())
  1318  
  1319  	// Now verify one of the writes. When we'll rollback to the savepoint below,
  1320  	// we'll check that the verified write stayed verified.
  1321  	txn := makeTxnProto()
  1322  	var ba roachpb.BatchRequest
  1323  	ba.Header = roachpb.Header{Txn: &txn}
  1324  	ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("a")}})
  1325  	mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
  1326  		require.Len(t, ba.Requests, 2)
  1327  		require.False(t, ba.AsyncConsensus)
  1328  		require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner())
  1329  		require.IsType(t, &roachpb.GetRequest{}, ba.Requests[1].GetInner())
  1330  
  1331  		qiReq := ba.Requests[0].GetInner().(*roachpb.QueryIntentRequest)
  1332  		require.Equal(t, roachpb.Key("a"), qiReq.Key)
  1333  		require.Equal(t, enginepb.TxnSeq(10), qiReq.Txn.Sequence)
  1334  
  1335  		br := ba.CreateReply()
  1336  		br.Txn = ba.Txn
  1337  		br.Responses[0].GetQueryIntent().FoundIntent = true
  1338  		return br, nil
  1339  	})
  1340  	br, pErr := tp.SendLocked(ctx, ba)
  1341  	require.Nil(t, pErr)
  1342  	require.NotNil(t, br)
  1343  	require.Equal(t, []roachpb.Span{{Key: roachpb.Key("a")}}, tp.lockFootprint.asSlice())
  1344  	require.Equal(t, 3, tp.ifWrites.len()) // We've verified one out of 4 writes.
  1345  
  1346  	// Now restore the savepoint and check that the in-flight write state has been restored
  1347  	// and all rolled-back writes were moved to the lock footprint.
  1348  	tp.rollbackToSavepointLocked(ctx, s)
  1349  
  1350  	// Check that the tracked inflight writes were updated correctly. The key that
  1351  	// had been verified ("a") should have been taken out of the savepoint. Same
  1352  	// for the "c", for which the pipeliner is now tracking a
  1353  	// higher-sequence-number (which implies that it must have verified the lower
  1354  	// sequence number write).
  1355  	var ifWrites []inFlightWrite
  1356  	tp.ifWrites.ascend(func(w *inFlightWrite) {
  1357  		ifWrites = append(ifWrites, *w)
  1358  	})
  1359  	require.Equal(t,
  1360  		[]inFlightWrite{
  1361  			{roachpb.SequencedWrite{Key: roachpb.Key("b"), Sequence: 11}},
  1362  		},
  1363  		ifWrites)
  1364  
  1365  	// Check that the footprint was updated correctly. In addition to the "a"
  1366  	// which it had before, it will also have "d" because it's not part of the
  1367  	// savepoint. It will also have "c" since that's not an in-flight write any
  1368  	// more (see above).
  1369  	require.Equal(t,
  1370  		[]roachpb.Span{
  1371  			{Key: roachpb.Key("a")},
  1372  			{Key: roachpb.Key("c")},
  1373  			{Key: roachpb.Key("d")},
  1374  		},
  1375  		tp.lockFootprint.asSlice())
  1376  
  1377  	// Now rollback to the initial savepoint and check that all in-flight writes are gone.
  1378  	tp.rollbackToSavepointLocked(ctx, initialSavepoint)
  1379  	require.Empty(t, tp.ifWrites.len())
  1380  }