github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/concurrency/lock_table_test.go (about)

     1  // Copyright 2020 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package concurrency
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"runtime"
    17  	"strconv"
    18  	"strings"
    19  	"sync/atomic"
    20  	"testing"
    21  	"time"
    22  
    23  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
    24  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanlatch"
    25  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset"
    26  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    27  	"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
    28  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    29  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    30  	"github.com/cockroachdb/cockroach/pkg/util/log"
    31  	"github.com/cockroachdb/cockroach/pkg/util/syncutil"
    32  	"github.com/cockroachdb/cockroach/pkg/util/timeutil"
    33  	"github.com/cockroachdb/cockroach/pkg/util/uint128"
    34  	"github.com/cockroachdb/cockroach/pkg/util/uuid"
    35  	"github.com/cockroachdb/datadriven"
    36  	"github.com/cockroachdb/errors"
    37  	"golang.org/x/exp/rand"
    38  	"golang.org/x/sync/errgroup"
    39  )
    40  
    41  /*
    42  Test needs to handle caller constraints wrt latches being held. The datadriven
    43  test uses the following format:
    44  
    45  new-lock-table maxlocks=<int>
    46  ----
    47  
    48    Creates a lockTable. The lockTable is initially enabled.
    49  
    50  new-txn txn=<name> ts=<int>[,<int>] epoch=<int> [seq=<int>]
    51  ----
    52  
    53   Creates a TxnMeta.
    54  
    55  new-request r=<name> txn=<name>|none ts=<int>[,<int>] spans=r|w@<start>[,<end>]+...
    56  ----
    57  
    58   Creates a Request.
    59  
    60  scan r=<name>
    61  ----
    62  <error string>|start-waiting: <bool>
    63  
    64   Calls lockTable.ScanAndEnqueue. If the request has an existing guard, uses it.
    65   If a guard is returned, stores it for later use.
    66  
    67  acquire r=<name> k=<key> durability=r|u
    68  ----
    69  <error string>
    70  
    71   Acquires lock for the request, using the existing guard for that request.
    72  
    73  release txn=<name> span=<start>[,<end>]
    74  ----
    75  <error string>
    76  
    77   Releases locks for the named transaction.
    78  
    79  update txn=<name> ts=<int>[,<int>] epoch=<int> span=<start>[,<end>] [ignored-seqs=<int>[-<int>][,<int>[-<int>]]]
    80  ----
    81  <error string>
    82  
    83   Updates locks for the named transaction.
    84  
    85  add-discovered r=<name> k=<key> txn=<name>
    86  ----
    87  <error string>
    88  
    89   Adds a discovered lock that is discovered by the named request.
    90  
    91  dequeue r=<name>
    92  ----
    93  <error string>
    94  
    95   Calls lockTable.Dequeue for the named request. The request and guard are
    96   discarded after this.
    97  
    98  guard-state r=<name>
    99  ----
   100  new|old: state=<state> [txn=<name> ts=<ts>]
   101  
   102    Calls lockTableGuard.NewStateChan in a non-blocking manner, followed by
   103    CurState.
   104  
   105  should-wait r=<name>
   106  ----
   107  <bool>
   108  
   109   Calls lockTableGuard.ShouldWait.
   110  
   111  enable
   112  ----
   113  
   114   Calls lockTable.Enable.
   115  
   116  clear [disable]
   117  ----
   118  <state of lock table>
   119  
   120   Calls lockTable.Clear. Optionally disables the lockTable.
   121  
   122  print
   123  ----
   124  <state of lock table>
   125  
   126   Calls lockTable.String.
   127  */
   128  
   129  func TestLockTableBasic(t *testing.T) {
   130  	defer leaktest.AfterTest(t)()
   131  
   132  	datadriven.Walk(t, "testdata/lock_table", func(t *testing.T, path string) {
   133  		var lt lockTable
   134  		var txnsByName map[string]*enginepb.TxnMeta
   135  		var txnCounter uint128.Uint128
   136  		var requestsByName map[string]Request
   137  		var guardsByReqName map[string]lockTableGuard
   138  		datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
   139  			switch d.Cmd {
   140  			case "new-lock-table":
   141  				var maxLocks int
   142  				d.ScanArgs(t, "maxlocks", &maxLocks)
   143  				lt = &lockTableImpl{
   144  					enabled:  true,
   145  					maxLocks: int64(maxLocks),
   146  				}
   147  				txnsByName = make(map[string]*enginepb.TxnMeta)
   148  				txnCounter = uint128.FromInts(0, 0)
   149  				requestsByName = make(map[string]Request)
   150  				guardsByReqName = make(map[string]lockTableGuard)
   151  				return ""
   152  
   153  			case "new-txn":
   154  				// UUIDs for transactions are numbered from 1 by this test code and
   155  				// lockTableImpl.String() knows about UUIDs and not transaction names.
   156  				// Assigning txnNames of the form txn1, txn2, ... keeps the two in sync,
   157  				// which makes test cases easier to understand.
   158  				var txnName string
   159  				d.ScanArgs(t, "txn", &txnName)
   160  				ts := scanTimestamp(t, d)
   161  				var epoch int
   162  				d.ScanArgs(t, "epoch", &epoch)
   163  				var seq int
   164  				if d.HasArg("seq") {
   165  					d.ScanArgs(t, "seq", &seq)
   166  				}
   167  				txnMeta, ok := txnsByName[txnName]
   168  				var id uuid.UUID
   169  				if ok {
   170  					id = txnMeta.ID
   171  				} else {
   172  					id = nextUUID(&txnCounter)
   173  				}
   174  				txnsByName[txnName] = &enginepb.TxnMeta{
   175  					ID:             id,
   176  					Epoch:          enginepb.TxnEpoch(epoch),
   177  					Sequence:       enginepb.TxnSeq(seq),
   178  					WriteTimestamp: ts,
   179  				}
   180  				return ""
   181  
   182  			case "new-request":
   183  				// Seqnums for requests are numbered from 1 by lockTableImpl and
   184  				// lockTableImpl.String() does not know about request names. Assigning
   185  				// request names of the form req1, req2, ... keeps the two in sync,
   186  				// which makes test cases easier to understand.
   187  				var reqName string
   188  				d.ScanArgs(t, "r", &reqName)
   189  				if _, ok := requestsByName[reqName]; ok {
   190  					d.Fatalf(t, "duplicate request: %s", reqName)
   191  				}
   192  				var txnName string
   193  				d.ScanArgs(t, "txn", &txnName)
   194  				txnMeta, ok := txnsByName[txnName]
   195  				if !ok && txnName != "none" {
   196  					d.Fatalf(t, "unknown txn %s", txnName)
   197  				}
   198  				ts := scanTimestamp(t, d)
   199  				spans := scanSpans(t, d, ts)
   200  				req := Request{
   201  					Timestamp:  ts,
   202  					LatchSpans: spans,
   203  					LockSpans:  spans,
   204  				}
   205  				if txnMeta != nil {
   206  					// Update the transaction's timestamp, if necessary. The transaction
   207  					// may have needed to move its timestamp for any number of reasons.
   208  					txnMeta.WriteTimestamp = ts
   209  					req.Txn = &roachpb.Transaction{
   210  						TxnMeta:       *txnMeta,
   211  						ReadTimestamp: ts,
   212  					}
   213  				}
   214  				requestsByName[reqName] = req
   215  				return ""
   216  
   217  			case "scan":
   218  				var reqName string
   219  				d.ScanArgs(t, "r", &reqName)
   220  				req, ok := requestsByName[reqName]
   221  				if !ok {
   222  					d.Fatalf(t, "unknown request: %s", reqName)
   223  				}
   224  				g := guardsByReqName[reqName]
   225  				g = lt.ScanAndEnqueue(req, g)
   226  				guardsByReqName[reqName] = g
   227  				return fmt.Sprintf("start-waiting: %t", g.ShouldWait())
   228  
   229  			case "acquire":
   230  				var reqName string
   231  				d.ScanArgs(t, "r", &reqName)
   232  				req, ok := requestsByName[reqName]
   233  				if !ok {
   234  					d.Fatalf(t, "unknown request: %s", reqName)
   235  				}
   236  				var key string
   237  				d.ScanArgs(t, "k", &key)
   238  				var s string
   239  				d.ScanArgs(t, "durability", &s)
   240  				if len(s) != 1 || (s[0] != 'r' && s[0] != 'u') {
   241  					d.Fatalf(t, "incorrect durability: %s", s)
   242  				}
   243  				durability := lock.Unreplicated
   244  				if s[0] == 'r' {
   245  					durability = lock.Replicated
   246  				}
   247  				if err := lt.AcquireLock(&req.Txn.TxnMeta, roachpb.Key(key), lock.Exclusive, durability); err != nil {
   248  					return err.Error()
   249  				}
   250  				return lt.(*lockTableImpl).String()
   251  
   252  			case "release":
   253  				var txnName string
   254  				d.ScanArgs(t, "txn", &txnName)
   255  				txnMeta, ok := txnsByName[txnName]
   256  				if !ok {
   257  					d.Fatalf(t, "unknown txn %s", txnName)
   258  				}
   259  				var s string
   260  				d.ScanArgs(t, "span", &s)
   261  				span := getSpan(t, d, s)
   262  				// TODO(sbhola): also test ABORTED.
   263  				intent := &roachpb.LockUpdate{Span: span, Txn: *txnMeta, Status: roachpb.COMMITTED}
   264  				if err := lt.UpdateLocks(intent); err != nil {
   265  					return err.Error()
   266  				}
   267  				return lt.(*lockTableImpl).String()
   268  
   269  			case "update":
   270  				var txnName string
   271  				d.ScanArgs(t, "txn", &txnName)
   272  				txnMeta, ok := txnsByName[txnName]
   273  				if !ok {
   274  					d.Fatalf(t, "unknown txn %s", txnName)
   275  				}
   276  				ts := scanTimestamp(t, d)
   277  				var epoch int
   278  				d.ScanArgs(t, "epoch", &epoch)
   279  				txnMeta = &enginepb.TxnMeta{ID: txnMeta.ID, Sequence: txnMeta.Sequence}
   280  				txnMeta.Epoch = enginepb.TxnEpoch(epoch)
   281  				txnMeta.WriteTimestamp = ts
   282  				txnsByName[txnName] = txnMeta
   283  				var s string
   284  				d.ScanArgs(t, "span", &s)
   285  				span := getSpan(t, d, s)
   286  				var ignored []enginepb.IgnoredSeqNumRange
   287  				if d.HasArg("ignored-seqs") {
   288  					var seqsStr string
   289  					d.ScanArgs(t, "ignored-seqs", &seqsStr)
   290  					parts := strings.Split(seqsStr, ",")
   291  					for _, p := range parts {
   292  						pair := strings.Split(p, "-")
   293  						if len(pair) != 1 && len(pair) != 2 {
   294  							d.Fatalf(t, "error parsing %s", parts)
   295  						}
   296  						startNum, err := strconv.ParseInt(pair[0], 10, 32)
   297  						if err != nil {
   298  							d.Fatalf(t, "error parsing ignored seqnums: %s", err)
   299  						}
   300  						ignoredRange := enginepb.IgnoredSeqNumRange{
   301  							Start: enginepb.TxnSeq(startNum), End: enginepb.TxnSeq(startNum)}
   302  						if len(pair) == 2 {
   303  							endNum, err := strconv.ParseInt(pair[1], 10, 32)
   304  							if err != nil {
   305  								d.Fatalf(t, "error parsing ignored seqnums: %s", err)
   306  							}
   307  							ignoredRange.End = enginepb.TxnSeq(endNum)
   308  						}
   309  						ignored = append(ignored, ignoredRange)
   310  					}
   311  				}
   312  				// TODO(sbhola): also test STAGING.
   313  				intent := &roachpb.LockUpdate{
   314  					Span: span, Txn: *txnMeta, Status: roachpb.PENDING, IgnoredSeqNums: ignored}
   315  				if err := lt.UpdateLocks(intent); err != nil {
   316  					return err.Error()
   317  				}
   318  				return lt.(*lockTableImpl).String()
   319  
   320  			case "add-discovered":
   321  				var reqName string
   322  				d.ScanArgs(t, "r", &reqName)
   323  				g := guardsByReqName[reqName]
   324  				if g == nil {
   325  					d.Fatalf(t, "unknown guard: %s", reqName)
   326  				}
   327  				var key string
   328  				d.ScanArgs(t, "k", &key)
   329  				var txnName string
   330  				d.ScanArgs(t, "txn", &txnName)
   331  				txnMeta, ok := txnsByName[txnName]
   332  				if !ok {
   333  					d.Fatalf(t, "unknown txn %s", txnName)
   334  				}
   335  				intent := roachpb.MakeIntent(txnMeta, roachpb.Key(key))
   336  				if _, err := lt.AddDiscoveredLock(&intent, g); err != nil {
   337  					return err.Error()
   338  				}
   339  				return lt.(*lockTableImpl).String()
   340  
   341  			case "dequeue":
   342  				var reqName string
   343  				d.ScanArgs(t, "r", &reqName)
   344  				g := guardsByReqName[reqName]
   345  				if g == nil {
   346  					d.Fatalf(t, "unknown guard: %s", reqName)
   347  				}
   348  				lt.Dequeue(g)
   349  				delete(guardsByReqName, reqName)
   350  				delete(requestsByName, reqName)
   351  				return lt.(*lockTableImpl).String()
   352  
   353  			case "should-wait":
   354  				var reqName string
   355  				d.ScanArgs(t, "r", &reqName)
   356  				g := guardsByReqName[reqName]
   357  				if g == nil {
   358  					d.Fatalf(t, "unknown guard: %s", reqName)
   359  				}
   360  				return fmt.Sprintf("%t", g.ShouldWait())
   361  
   362  			case "guard-state":
   363  				var reqName string
   364  				d.ScanArgs(t, "r", &reqName)
   365  				g := guardsByReqName[reqName]
   366  				if g == nil {
   367  					d.Fatalf(t, "unknown guard: %s", reqName)
   368  				}
   369  				var str string
   370  				select {
   371  				case <-g.NewStateChan():
   372  					str = "new: "
   373  				default:
   374  					str = "old: "
   375  				}
   376  				state := g.CurState()
   377  				var typeStr string
   378  				switch state.kind {
   379  				case waitForDistinguished:
   380  					typeStr = "waitForDistinguished"
   381  				case waitFor:
   382  					typeStr = "waitFor"
   383  				case waitElsewhere:
   384  					typeStr = "waitElsewhere"
   385  				case waitSelf:
   386  					return str + "state=waitSelf"
   387  				case doneWaiting:
   388  					return str + "state=doneWaiting"
   389  				}
   390  				id := state.txn.ID
   391  				var txnS string
   392  				for k, v := range txnsByName {
   393  					if v.ID.Equal(id) {
   394  						txnS = k
   395  						break
   396  					}
   397  				}
   398  				if txnS == "" {
   399  					txnS = fmt.Sprintf("unknown txn with ID: %v", state.txn.ID)
   400  				}
   401  				return fmt.Sprintf("%sstate=%s txn=%s key=%s held=%t guard-access=%s",
   402  					str, typeStr, txnS, state.key, state.held, state.guardAccess)
   403  
   404  			case "enable":
   405  				lt.Enable()
   406  				return ""
   407  
   408  			case "clear":
   409  				lt.Clear(d.HasArg("disable"))
   410  				return lt.(*lockTableImpl).String()
   411  
   412  			case "print":
   413  				return lt.(*lockTableImpl).String()
   414  
   415  			default:
   416  				return fmt.Sprintf("unknown command: %s", d.Cmd)
   417  			}
   418  		})
   419  	})
   420  }
   421  
   422  func nextUUID(counter *uint128.Uint128) uuid.UUID {
   423  	*counter = counter.Add(1)
   424  	return uuid.FromUint128(*counter)
   425  }
   426  
   427  func scanTimestamp(t *testing.T, d *datadriven.TestData) hlc.Timestamp {
   428  	var ts hlc.Timestamp
   429  	var tsS string
   430  	d.ScanArgs(t, "ts", &tsS)
   431  	parts := strings.Split(tsS, ",")
   432  
   433  	// Find the wall time part.
   434  	tsW, err := strconv.ParseInt(parts[0], 10, 64)
   435  	if err != nil {
   436  		d.Fatalf(t, "%v", err)
   437  	}
   438  	ts.WallTime = tsW
   439  
   440  	// Find the logical part, if there is one.
   441  	var tsL int64
   442  	if len(parts) > 1 {
   443  		tsL, err = strconv.ParseInt(parts[1], 10, 32)
   444  		if err != nil {
   445  			d.Fatalf(t, "%v", err)
   446  		}
   447  	}
   448  	ts.Logical = int32(tsL)
   449  	return ts
   450  }
   451  
   452  func getSpan(t *testing.T, d *datadriven.TestData, str string) roachpb.Span {
   453  	parts := strings.Split(str, ",")
   454  	span := roachpb.Span{Key: roachpb.Key(parts[0])}
   455  	if len(parts) > 2 {
   456  		d.Fatalf(t, "incorrect span format: %s", str)
   457  	} else if len(parts) == 2 {
   458  		span.EndKey = roachpb.Key(parts[1])
   459  	}
   460  	return span
   461  }
   462  
   463  func scanSpans(t *testing.T, d *datadriven.TestData, ts hlc.Timestamp) *spanset.SpanSet {
   464  	spans := &spanset.SpanSet{}
   465  	var spansStr string
   466  	d.ScanArgs(t, "spans", &spansStr)
   467  	parts := strings.Split(spansStr, "+")
   468  	for _, p := range parts {
   469  		if len(p) < 2 || p[1] != '@' {
   470  			d.Fatalf(t, "incorrect span with access format: %s", p)
   471  		}
   472  		c := p[0]
   473  		p = p[2:]
   474  		var sa spanset.SpanAccess
   475  		switch c {
   476  		case 'r':
   477  			sa = spanset.SpanReadOnly
   478  		case 'w':
   479  			sa = spanset.SpanReadWrite
   480  		default:
   481  			d.Fatalf(t, "incorrect span access: %c", c)
   482  		}
   483  		spans.AddMVCC(sa, getSpan(t, d, p), ts)
   484  	}
   485  	return spans
   486  }
   487  
   488  type workItem struct {
   489  	// Contains one of request or intents.
   490  
   491  	// Request.
   492  	request        *Request
   493  	locksToAcquire []roachpb.Key
   494  
   495  	// Update locks.
   496  	intents []roachpb.LockUpdate
   497  }
   498  
   499  func (w *workItem) getRequestTxnID() uuid.UUID {
   500  	if w.request != nil && w.request.Txn != nil {
   501  		return w.request.Txn.ID
   502  	}
   503  	return uuid.UUID{}
   504  }
   505  
   506  func doWork(ctx context.Context, item *workItem, e *workloadExecutor) error {
   507  	defer func() {
   508  		e.doneWork <- item
   509  	}()
   510  	if item.request != nil {
   511  		var lg *spanlatch.Guard
   512  		var g lockTableGuard
   513  		var err error
   514  		for {
   515  			// Since we can't do a select involving latch acquisition and context
   516  			// cancellation, the code makes sure to release latches when returning
   517  			// early due to error. Otherwise other requests will get stuck and
   518  			// group.Wait() will not return until the test times out.
   519  			lg, err = e.lm.Acquire(context.Background(), item.request.LatchSpans)
   520  			if err != nil {
   521  				return err
   522  			}
   523  			g = e.lt.ScanAndEnqueue(*item.request, g)
   524  			if !g.ShouldWait() {
   525  				break
   526  			}
   527  			e.lm.Release(lg)
   528  			var lastID uuid.UUID
   529  		L:
   530  			for {
   531  				select {
   532  				case <-g.NewStateChan():
   533  				case <-ctx.Done():
   534  					return ctx.Err()
   535  				}
   536  				state := g.CurState()
   537  				switch state.kind {
   538  				case doneWaiting:
   539  					if !lastID.Equal(uuid.UUID{}) && item.request.Txn != nil {
   540  						_, err = e.waitingFor(item.request.Txn.ID, lastID, uuid.UUID{})
   541  						if err != nil {
   542  							e.lt.Dequeue(g)
   543  							return err
   544  						}
   545  					}
   546  					break L
   547  				case waitSelf:
   548  					if item.request.Txn == nil {
   549  						e.lt.Dequeue(g)
   550  						return errors.Errorf("non-transactional request cannot waitSelf")
   551  					}
   552  				case waitForDistinguished, waitFor, waitElsewhere:
   553  					if item.request.Txn != nil {
   554  						var aborted bool
   555  						aborted, err = e.waitingFor(item.request.Txn.ID, lastID, state.txn.ID)
   556  						if !aborted {
   557  							lastID = state.txn.ID
   558  						}
   559  						if aborted {
   560  							e.lt.Dequeue(g)
   561  							return err
   562  						}
   563  					}
   564  				default:
   565  					return errors.Errorf("unexpected state: %v", state.kind)
   566  				}
   567  			}
   568  		}
   569  
   570  		// acquire locks.
   571  		for _, k := range item.locksToAcquire {
   572  			err = e.acquireLock(&item.request.Txn.TxnMeta, k)
   573  			if err != nil {
   574  				break
   575  			}
   576  		}
   577  		e.lt.Dequeue(g)
   578  		e.lm.Release(lg)
   579  		return err
   580  	}
   581  	for i := range item.intents {
   582  		if err := e.lt.UpdateLocks(&item.intents[i]); err != nil {
   583  			return err
   584  		}
   585  	}
   586  	return nil
   587  }
   588  
   589  // Contains either a request or the ID of the transactions whose locks should
   590  // be released.
   591  type workloadItem struct {
   592  	// Request to be executed, iff request != nil
   593  	request *Request
   594  	// locks to be acquired by the request.
   595  	locksToAcquire []roachpb.Key
   596  
   597  	// Non-empty when transaction should release locks.
   598  	finish uuid.UUID
   599  }
   600  
   601  // state of a transaction maintained by workloadExecutor, for deadlock
   602  // detection, and deciding when transaction can be finished (when a
   603  // workloadItem has instructed that it be finished and all its ongoing
   604  // requests have finished). Requests can be aborted due to deadlock. For
   605  // workload execution convenience this does not abort the whole transaction,
   606  // but it does mean that the locks acquired by the transaction can be a subset
   607  // of what it was instructed to acquire. The locks acquired are tracked in
   608  // acquiredLocks.
   609  type transactionState struct {
   610  	txn *enginepb.TxnMeta
   611  
   612  	// A map of each transaction it depends on and a refcount (the refcount is >
   613  	// 0).
   614  	dependsOn       map[uuid.UUID]int
   615  	ongoingRequests map[*workItem]struct{}
   616  	acquiredLocks   []roachpb.Key
   617  	finish          bool
   618  }
   619  
   620  func makeWorkItemFinishTxn(tstate *transactionState) workItem {
   621  	wItem := workItem{}
   622  	for i := range tstate.acquiredLocks {
   623  		wItem.intents = append(wItem.intents, roachpb.LockUpdate{
   624  			Span:   roachpb.Span{Key: tstate.acquiredLocks[i]},
   625  			Txn:    *tstate.txn,
   626  			Status: roachpb.COMMITTED,
   627  		})
   628  	}
   629  	return wItem
   630  }
   631  
   632  func makeWorkItemForRequest(wi workloadItem) workItem {
   633  	wItem := workItem{
   634  		request:        wi.request,
   635  		locksToAcquire: wi.locksToAcquire,
   636  	}
   637  	return wItem
   638  }
   639  
   640  type workloadExecutor struct {
   641  	lm spanlatch.Manager
   642  	lt lockTable
   643  
   644  	// Protects the following fields in transactionState: acquiredLocks and
   645  	// dependsOn, and the transactions map.
   646  	mu                syncutil.Mutex
   647  	items             []workloadItem
   648  	transactions      map[uuid.UUID]*transactionState
   649  	doneWork          chan *workItem
   650  	concurrency       int
   651  	numAborted        int
   652  	numConcViolations int
   653  }
   654  
   655  func newWorkLoadExecutor(items []workloadItem, concurrency int) *workloadExecutor {
   656  	return &workloadExecutor{
   657  		lm: spanlatch.Manager{},
   658  		lt: &lockTableImpl{
   659  			enabled:  true,
   660  			maxLocks: 100000,
   661  		},
   662  		items:        items,
   663  		transactions: make(map[uuid.UUID]*transactionState),
   664  		doneWork:     make(chan *workItem),
   665  		concurrency:  concurrency,
   666  	}
   667  }
   668  
   669  func (e *workloadExecutor) acquireLock(txn *enginepb.TxnMeta, k roachpb.Key) error {
   670  	err := e.lt.AcquireLock(txn, k, lock.Exclusive, lock.Unreplicated)
   671  	if err != nil {
   672  		return err
   673  	}
   674  	e.mu.Lock()
   675  	defer e.mu.Unlock()
   676  	tstate, ok := e.transactions[txn.ID]
   677  	if !ok {
   678  		return errors.Errorf("testbug: lock acquiring request with txnID %v has no transaction", txn.ID)
   679  	}
   680  	tstate.acquiredLocks = append(tstate.acquiredLocks, k)
   681  	return nil
   682  }
   683  
   684  // Returns true if cycle was found. err != nil => true
   685  func (e *workloadExecutor) findCycle(node uuid.UUID, cycleNode uuid.UUID) (bool, error) {
   686  	if node == cycleNode {
   687  		return true, nil
   688  	}
   689  	tstate, ok := e.transactions[node]
   690  	if !ok {
   691  		return true, errors.Errorf("edge to txn that is not in map")
   692  	}
   693  	for k := range tstate.dependsOn {
   694  		found, err := e.findCycle(k, cycleNode)
   695  		if err != nil || found {
   696  			return true, err
   697  		}
   698  	}
   699  	return false, nil
   700  }
   701  
   702  // Returns true if request should abort. err != nil => true
   703  func (e *workloadExecutor) waitingFor(
   704  	waiter uuid.UUID, lastID uuid.UUID, currID uuid.UUID,
   705  ) (bool, error) {
   706  	e.mu.Lock()
   707  	defer e.mu.Unlock()
   708  	tstate, ok := e.transactions[waiter]
   709  	if !ok {
   710  		return true, errors.Errorf("testbug: request calling waitingFor with txnID %v has no transaction", waiter)
   711  	}
   712  	if !lastID.Equal(uuid.UUID{}) {
   713  		refcount := tstate.dependsOn[lastID]
   714  		refcount--
   715  		if refcount > 0 {
   716  			tstate.dependsOn[lastID] = refcount
   717  		} else if refcount == 0 {
   718  			delete(tstate.dependsOn, lastID)
   719  		} else {
   720  			return true, errors.Errorf("testbug: txn %v has a negative refcount %d for dependency on %v", waiter, refcount, lastID)
   721  		}
   722  	}
   723  	if !currID.Equal(uuid.UUID{}) {
   724  		refcount := tstate.dependsOn[currID]
   725  		// Cycle detection to detect if this new edge has introduced any
   726  		// cycle. We know there cannot be a cycle preceding this edge,
   727  		// so any cycle must involve waiter. We do a trivial recursive
   728  		// DFS (does not avoid exploring the same node multiple times).
   729  		if refcount == 0 {
   730  			found, err := e.findCycle(currID, waiter)
   731  			if found {
   732  				return found, err
   733  			}
   734  		}
   735  		refcount++
   736  		tstate.dependsOn[currID] = refcount
   737  	}
   738  	return false, nil
   739  }
   740  
   741  // Returns true if it started a goroutine.
   742  func (e *workloadExecutor) tryFinishTxn(
   743  	ctx context.Context, group *errgroup.Group, txnID uuid.UUID, tstate *transactionState,
   744  ) bool {
   745  	if !tstate.finish || len(tstate.ongoingRequests) > 0 {
   746  		return false
   747  	}
   748  	if len(tstate.acquiredLocks) > 0 {
   749  		work := makeWorkItemFinishTxn(tstate)
   750  		group.Go(func() error { return doWork(ctx, &work, e) })
   751  		return true
   752  	}
   753  	return false
   754  }
   755  
   756  // Execution can be either in strict or non-strict mode. In strict mode
   757  // the executor expects to be able to limit concurrency to the configured
   758  // value L by waiting for the L ongoing requests to finish. It sets a
   759  // deadline for this, and returns with an error if the deadline expires
   760  // (this is just a slightly cleaner error than the test exceeding the
   761  // deadline since it prints out the lock table contents).
   762  //
   763  // When executing in non-strict mode, the concurrency bound is not necessarily
   764  // respected since requests may be waiting for locks to be released. The
   765  // executor waits for a tiny interval when concurrency is >= L and if
   766  // no request completes it starts another request. Just for our curiosity
   767  // these "concurrency violations" are tracked in a counter. The amount of
   768  // concurrency in this non-strict mode is bounded by maxNonStrictConcurrency.
   769  func (e *workloadExecutor) execute(strict bool, maxNonStrictConcurrency int) error {
   770  	numOutstanding := 0
   771  	i := 0
   772  	group, ctx := errgroup.WithContext(context.Background())
   773  	timer := time.NewTimer(time.Second)
   774  	timer.Stop()
   775  	var err error
   776  L:
   777  	for i < len(e.items) || numOutstanding > 0 {
   778  		if numOutstanding >= e.concurrency || (i == len(e.items) && numOutstanding > 0) {
   779  			strictIter := strict || i == len(e.items)
   780  			if strictIter {
   781  				timer.Reset(30 * time.Second)
   782  			} else {
   783  				timer.Reset(time.Millisecond)
   784  			}
   785  			var w *workItem
   786  			select {
   787  			case w = <-e.doneWork:
   788  				if !timer.Stop() {
   789  					<-timer.C
   790  				}
   791  			case <-timer.C:
   792  				if strictIter {
   793  					err = errors.Errorf("timer expired with lock table: %v", e.lt)
   794  					break L
   795  				} else if numOutstanding > maxNonStrictConcurrency {
   796  					continue
   797  				} else {
   798  					e.numConcViolations++
   799  				}
   800  			}
   801  			if w != nil {
   802  				numOutstanding--
   803  				txnID := w.getRequestTxnID()
   804  				if !txnID.Equal(uuid.UUID{}) {
   805  					tstate, ok := e.transactions[txnID]
   806  					if !ok {
   807  						err = errors.Errorf("testbug: finished request with txnID %v has no transaction", txnID)
   808  						break
   809  					}
   810  					delete(tstate.ongoingRequests, w)
   811  					if e.tryFinishTxn(ctx, group, txnID, tstate) {
   812  						numOutstanding++
   813  						continue
   814  					}
   815  				}
   816  			}
   817  		}
   818  		if i == len(e.items) {
   819  			continue
   820  		}
   821  
   822  		wi := e.items[i]
   823  		i++
   824  		if wi.request != nil {
   825  			work := makeWorkItemForRequest(wi)
   826  			if wi.request.Txn != nil {
   827  				txnID := wi.request.Txn.ID
   828  				_, ok := e.transactions[txnID]
   829  				if !ok {
   830  					// New transaction
   831  					tstate := &transactionState{
   832  						txn:             &wi.request.Txn.TxnMeta,
   833  						dependsOn:       make(map[uuid.UUID]int),
   834  						ongoingRequests: make(map[*workItem]struct{}),
   835  					}
   836  					e.mu.Lock()
   837  					e.transactions[txnID] = tstate
   838  					e.mu.Unlock()
   839  				}
   840  				e.transactions[txnID].ongoingRequests[&work] = struct{}{}
   841  			}
   842  			group.Go(func() error { return doWork(ctx, &work, e) })
   843  			numOutstanding++
   844  			continue
   845  		}
   846  		tstate, ok := e.transactions[wi.finish]
   847  		if !ok {
   848  			err = errors.Errorf("testbug: txn to finish not found: %v", wi.finish)
   849  			break
   850  		}
   851  		tstate.finish = true
   852  		if e.tryFinishTxn(ctx, group, wi.finish, tstate) {
   853  			numOutstanding++
   854  		}
   855  	}
   856  	err2 := group.Wait()
   857  	if err2 != nil {
   858  		err = err2
   859  	}
   860  	fmt.Printf("items: %d, aborted: %d, concurrency violations: %d, lock table: %v\n",
   861  		len(e.items), e.numAborted, e.numConcViolations, e.lt)
   862  	return err
   863  }
   864  
   865  // Randomized test with each transaction having a single request that does not
   866  // acquire locks. Note that this ensures there will be no deadlocks. And the
   867  // test executor can run in strict concurrency mode (see comment in execute()).
   868  func TestLockTableConcurrentSingleRequests(t *testing.T) {
   869  	defer leaktest.AfterTest(t)()
   870  
   871  	txnCounter := uint128.FromInts(0, 0)
   872  	var timestamps []hlc.Timestamp
   873  	for i := 0; i < 10; i++ {
   874  		timestamps = append(timestamps, hlc.Timestamp{WallTime: int64(i + 1)})
   875  	}
   876  	var keys []roachpb.Key
   877  	for i := 0; i < 10; i++ {
   878  		keys = append(keys, roachpb.Key(string('a'+i)))
   879  	}
   880  	rng := rand.New(rand.NewSource(uint64(timeutil.Now().UnixNano())))
   881  
   882  	const numKeys = 2
   883  	var items []workloadItem
   884  	var startedTxnIDs []uuid.UUID // inefficient queue, but ok for a test.
   885  	const maxStartedTxns = 10
   886  	const numRequests = 10000
   887  	for i := 0; i < numRequests; i++ {
   888  		ts := timestamps[rng.Intn(len(timestamps))]
   889  		keysPerm := rng.Perm(len(keys))
   890  		spans := &spanset.SpanSet{}
   891  		for i := 0; i < numKeys; i++ {
   892  			span := roachpb.Span{Key: keys[keysPerm[i]]}
   893  			acc := spanset.SpanAccess(rng.Intn(int(spanset.NumSpanAccess)))
   894  			spans.AddMVCC(acc, span, ts)
   895  		}
   896  		var txn *roachpb.Transaction
   897  		if rng.Intn(2) == 0 {
   898  			txn = &roachpb.Transaction{
   899  				TxnMeta: enginepb.TxnMeta{
   900  					ID:             nextUUID(&txnCounter),
   901  					WriteTimestamp: ts,
   902  				},
   903  				ReadTimestamp: ts,
   904  			}
   905  		}
   906  		request := &Request{
   907  			Txn:        txn,
   908  			Timestamp:  ts,
   909  			LatchSpans: spans,
   910  			LockSpans:  spans,
   911  		}
   912  		items = append(items, workloadItem{request: request})
   913  		if txn != nil {
   914  			startedTxnIDs = append(startedTxnIDs, txn.ID)
   915  		}
   916  		randomMaxStartedTxns := rng.Intn(maxStartedTxns)
   917  		for len(startedTxnIDs) > randomMaxStartedTxns {
   918  			items = append(items, workloadItem{finish: startedTxnIDs[0]})
   919  			startedTxnIDs = startedTxnIDs[1:]
   920  		}
   921  	}
   922  	for len(startedTxnIDs) > 0 {
   923  		items = append(items, workloadItem{finish: startedTxnIDs[0]})
   924  		startedTxnIDs = startedTxnIDs[1:]
   925  	}
   926  	concurrency := []int{2, 8, 16, 32}
   927  	for _, c := range concurrency {
   928  		t.Run(fmt.Sprintf("concurrency %d", c), func(t *testing.T) {
   929  			exec := newWorkLoadExecutor(items, c)
   930  			if err := exec.execute(true, 0); err != nil {
   931  				t.Fatal(err)
   932  			}
   933  		})
   934  	}
   935  }
   936  
   937  // General randomized test.
   938  func TestLockTableConcurrentRequests(t *testing.T) {
   939  	defer leaktest.AfterTest(t)()
   940  
   941  	// TODO(sbhola): different test cases with different settings of the
   942  	// randomization parameters.
   943  	txnCounter := uint128.FromInts(0, 0)
   944  	var timestamps []hlc.Timestamp
   945  	for i := 0; i < 2; i++ {
   946  		timestamps = append(timestamps, hlc.Timestamp{WallTime: int64(i + 1)})
   947  	}
   948  	var keys []roachpb.Key
   949  	for i := 0; i < 10; i++ {
   950  		keys = append(keys, roachpb.Key(string('a'+i)))
   951  	}
   952  	rng := rand.New(rand.NewSource(uint64(timeutil.Now().UnixNano())))
   953  	const numActiveTxns = 8
   954  	var activeTxns [numActiveTxns]*enginepb.TxnMeta
   955  	var items []workloadItem
   956  	const numRequests = 1000
   957  	for i := 0; i < numRequests; i++ {
   958  		var txnMeta *enginepb.TxnMeta
   959  		var ts hlc.Timestamp
   960  		if rng.Intn(10) != 0 {
   961  			txnIndex := rng.Intn(len(activeTxns))
   962  			newTxn := rng.Intn(4) != 0 || activeTxns[txnIndex] == nil
   963  			if newTxn {
   964  				ts = timestamps[rng.Intn(len(timestamps))]
   965  				txnMeta = &enginepb.TxnMeta{
   966  					ID:             nextUUID(&txnCounter),
   967  					WriteTimestamp: ts,
   968  				}
   969  				oldTxn := activeTxns[txnIndex]
   970  				if oldTxn != nil {
   971  					items = append(items, workloadItem{finish: oldTxn.ID})
   972  				}
   973  				activeTxns[txnIndex] = txnMeta
   974  			} else {
   975  				txnMeta = activeTxns[txnIndex]
   976  				ts = txnMeta.WriteTimestamp
   977  			}
   978  		} else {
   979  			ts = timestamps[rng.Intn(len(timestamps))]
   980  		}
   981  		keysPerm := rng.Perm(len(keys))
   982  		spans := &spanset.SpanSet{}
   983  		onlyReads := txnMeta == nil && rng.Intn(2) != 0
   984  		numKeys := rng.Intn(len(keys)-1) + 1
   985  		request := &Request{
   986  			Timestamp:  ts,
   987  			LatchSpans: spans,
   988  			LockSpans:  spans,
   989  		}
   990  		if txnMeta != nil {
   991  			request.Txn = &roachpb.Transaction{
   992  				TxnMeta:       *txnMeta,
   993  				ReadTimestamp: ts,
   994  			}
   995  		}
   996  		wi := workloadItem{request: request}
   997  		for i := 0; i < numKeys; i++ {
   998  			span := roachpb.Span{Key: keys[keysPerm[i]]}
   999  			acc := spanset.SpanReadOnly
  1000  			dupRead := false
  1001  			if !onlyReads {
  1002  				acc = spanset.SpanAccess(rng.Intn(int(spanset.NumSpanAccess)))
  1003  				if acc == spanset.SpanReadWrite && txnMeta != nil && rng.Intn(2) == 0 {
  1004  					// Acquire lock.
  1005  					wi.locksToAcquire = append(wi.locksToAcquire, span.Key)
  1006  				}
  1007  				if acc == spanset.SpanReadWrite && rng.Intn(2) == 0 {
  1008  					// Also include the key as read.
  1009  					dupRead = true
  1010  				}
  1011  			}
  1012  			spans.AddMVCC(acc, span, ts)
  1013  			if dupRead {
  1014  				spans.AddMVCC(spanset.SpanReadOnly, span, ts)
  1015  			}
  1016  		}
  1017  		items = append(items, wi)
  1018  	}
  1019  	for i := range activeTxns {
  1020  		if txnMeta := activeTxns[i]; txnMeta != nil {
  1021  			items = append(items, workloadItem{finish: txnMeta.ID})
  1022  		}
  1023  	}
  1024  	concurrency := []int{2, 8, 16, 32}
  1025  	for _, c := range concurrency {
  1026  		t.Run(fmt.Sprintf("concurrency %d", c), func(t *testing.T) {
  1027  			exec := newWorkLoadExecutor(items, c)
  1028  			if err := exec.execute(false, 200); err != nil {
  1029  				t.Fatal(err)
  1030  			}
  1031  		})
  1032  	}
  1033  }
  1034  
  1035  type benchWorkItem struct {
  1036  	Request
  1037  	locksToAcquire []roachpb.Key
  1038  }
  1039  
  1040  type benchEnv struct {
  1041  	lm *spanlatch.Manager
  1042  	lt lockTable
  1043  	// Stats. As expected, the contended benchmarks have most requests
  1044  	// encountering a wait, and the number of scan calls is twice the
  1045  	// number of requests.
  1046  	numRequestsWaited *uint64
  1047  	numScanCalls      *uint64
  1048  }
  1049  
  1050  // Does the work for one request. Both acquires and releases the locks.
  1051  // It drops the latches after acquiring locks and reacquires latches
  1052  // before releasing the locks, which will induce contention in the
  1053  // lockTable since the requests that were waiting for latches will do
  1054  // a call to ScanAndEnqueue before the locks are released.
  1055  func doBenchWork(item *benchWorkItem, env benchEnv, doneCh chan<- error) {
  1056  	var lg *spanlatch.Guard
  1057  	var g lockTableGuard
  1058  	var err error
  1059  	firstIter := true
  1060  	for {
  1061  		if lg, err = env.lm.Acquire(context.Background(), item.LatchSpans); err != nil {
  1062  			doneCh <- err
  1063  			return
  1064  		}
  1065  		g = env.lt.ScanAndEnqueue(item.Request, g)
  1066  		atomic.AddUint64(env.numScanCalls, 1)
  1067  		if !g.ShouldWait() {
  1068  			break
  1069  		}
  1070  		if firstIter {
  1071  			atomic.AddUint64(env.numRequestsWaited, 1)
  1072  			firstIter = false
  1073  		}
  1074  		env.lm.Release(lg)
  1075  		for {
  1076  			<-g.NewStateChan()
  1077  			state := g.CurState()
  1078  			if state.kind == doneWaiting {
  1079  				break
  1080  			}
  1081  		}
  1082  	}
  1083  	for _, k := range item.locksToAcquire {
  1084  		if err = env.lt.AcquireLock(&item.Txn.TxnMeta, k, lock.Exclusive, lock.Unreplicated); err != nil {
  1085  			doneCh <- err
  1086  			return
  1087  		}
  1088  	}
  1089  	env.lt.Dequeue(g)
  1090  	env.lm.Release(lg)
  1091  	if len(item.locksToAcquire) == 0 {
  1092  		doneCh <- nil
  1093  		return
  1094  	}
  1095  	// Release locks.
  1096  	if lg, err = env.lm.Acquire(context.Background(), item.LatchSpans); err != nil {
  1097  		doneCh <- err
  1098  		return
  1099  	}
  1100  	for _, k := range item.locksToAcquire {
  1101  		intent := roachpb.LockUpdate{
  1102  			Span:   roachpb.Span{Key: k},
  1103  			Txn:    item.Request.Txn.TxnMeta,
  1104  			Status: roachpb.COMMITTED,
  1105  		}
  1106  		if err = env.lt.UpdateLocks(&intent); err != nil {
  1107  			doneCh <- err
  1108  			return
  1109  		}
  1110  	}
  1111  	env.lm.Release(lg)
  1112  	doneCh <- nil
  1113  }
  1114  
  1115  // Creates requests for a group of contending requests. The group is
  1116  // identified by index. The group has numOutstanding requests at a time, so
  1117  // that is the number of requests created. numKeys is the number of keys
  1118  // accessed by this group, of which numReadKeys are only read -- the remaining
  1119  // keys will be locked.
  1120  func createRequests(index int, numOutstanding int, numKeys int, numReadKeys int) []benchWorkItem {
  1121  	ts := hlc.Timestamp{WallTime: 10}
  1122  	spans := &spanset.SpanSet{}
  1123  	wi := benchWorkItem{
  1124  		Request: Request{
  1125  			Timestamp:  ts,
  1126  			LatchSpans: spans,
  1127  			LockSpans:  spans,
  1128  		},
  1129  	}
  1130  	for i := 0; i < numKeys; i++ {
  1131  		key := roachpb.Key(fmt.Sprintf("k%d.%d", index, i))
  1132  		if i <= numReadKeys {
  1133  			spans.AddMVCC(spanset.SpanReadOnly, roachpb.Span{Key: key}, ts)
  1134  		} else {
  1135  			spans.AddMVCC(spanset.SpanReadWrite, roachpb.Span{Key: key}, ts)
  1136  			wi.locksToAcquire = append(wi.locksToAcquire, key)
  1137  		}
  1138  	}
  1139  	var result []benchWorkItem
  1140  	txnCounter := uint128.FromInts(0, 0)
  1141  	for i := 0; i < numOutstanding; i++ {
  1142  		wiCopy := wi
  1143  		wiCopy.Request.Txn = &roachpb.Transaction{
  1144  			TxnMeta: enginepb.TxnMeta{
  1145  				ID:             nextUUID(&txnCounter),
  1146  				WriteTimestamp: ts,
  1147  			},
  1148  			ReadTimestamp: ts,
  1149  		}
  1150  		result = append(result, wiCopy)
  1151  	}
  1152  	return result
  1153  }
  1154  
  1155  // Interface that is also implemented by *testing.PB.
  1156  type iterations interface {
  1157  	Next() bool
  1158  }
  1159  
  1160  type simpleIters struct {
  1161  	n int
  1162  }
  1163  
  1164  var _ iterations = (*simpleIters)(nil)
  1165  
  1166  func (i *simpleIters) Next() bool {
  1167  	i.n--
  1168  	return i.n >= 0
  1169  }
  1170  
  1171  // Runs the requests for a group of contending requests. The number of
  1172  // concurrent requests is equal to the length of items.
  1173  func runRequests(b *testing.B, pb iterations, items []benchWorkItem, env benchEnv) {
  1174  	requestDoneCh := make(chan error, len(items))
  1175  	i := 0
  1176  	outstanding := 0
  1177  	for pb.Next() {
  1178  		if outstanding == len(items) {
  1179  			if err := <-requestDoneCh; err != nil {
  1180  				b.Fatal(err)
  1181  			}
  1182  			outstanding--
  1183  		}
  1184  		go doBenchWork(&items[i], env, requestDoneCh)
  1185  		outstanding++
  1186  		i = (i + 1) % len(items)
  1187  	}
  1188  	// Drain
  1189  	for outstanding > 0 {
  1190  		if err := <-requestDoneCh; err != nil {
  1191  			b.Fatal(err)
  1192  		}
  1193  		outstanding--
  1194  	}
  1195  }
  1196  
  1197  // Benchmarks that varies the number of request groups (requests within a
  1198  // group are contending), the number of outstanding requests per group, and
  1199  // the number of read keys for the requests in each group (when the number of
  1200  // read keys is equal to the total keys, there is no contention within the
  1201  // group). The number of groups is either 1 or GOMAXPROCS, in order to use
  1202  // RunParallel() -- it doesn't seem possible to get parallelism between these
  1203  // two values when using B.RunParallel() since B.SetParallelism() accepts an
  1204  // integer multiplier to GOMAXPROCS.
  1205  func BenchmarkLockTable(b *testing.B) {
  1206  	maxGroups := runtime.GOMAXPROCS(0)
  1207  	const numKeys = 5
  1208  	for _, numGroups := range []int{1, maxGroups} {
  1209  		for _, outstandingPerGroup := range []int{1, 2, 4, 8, 16} {
  1210  			for numReadKeys := 0; numReadKeys <= numKeys; numReadKeys++ {
  1211  				b.Run(
  1212  					fmt.Sprintf("groups=%d,outstanding=%d,read=%d/", numGroups, outstandingPerGroup,
  1213  						numReadKeys),
  1214  					func(b *testing.B) {
  1215  						var numRequestsWaited uint64
  1216  						var numScanCalls uint64
  1217  						env := benchEnv{
  1218  							lm: &spanlatch.Manager{},
  1219  							lt: &lockTableImpl{
  1220  								enabled:  true,
  1221  								maxLocks: 100000,
  1222  							},
  1223  							numRequestsWaited: &numRequestsWaited,
  1224  							numScanCalls:      &numScanCalls,
  1225  						}
  1226  						var requestsPerGroup [][]benchWorkItem
  1227  						for i := 0; i < numGroups; i++ {
  1228  							requestsPerGroup = append(requestsPerGroup,
  1229  								createRequests(i, outstandingPerGroup, numKeys, numReadKeys))
  1230  						}
  1231  						b.ResetTimer()
  1232  						if numGroups > 1 {
  1233  							var groupNum int32 = -1
  1234  							b.RunParallel(func(pb *testing.PB) {
  1235  								index := atomic.AddInt32(&groupNum, 1)
  1236  								runRequests(b, pb, requestsPerGroup[index], env)
  1237  							})
  1238  						} else {
  1239  							iters := &simpleIters{b.N}
  1240  							runRequests(b, iters, requestsPerGroup[0], env)
  1241  						}
  1242  						if log.V(1) {
  1243  							log.Infof(context.Background(), "num requests that waited: %d, num scan calls: %d\n",
  1244  								atomic.LoadUint64(&numRequestsWaited), atomic.LoadUint64(&numScanCalls))
  1245  						}
  1246  					})
  1247  			}
  1248  		}
  1249  	}
  1250  }
  1251  
  1252  // TODO(sbhola):
  1253  // - More datadriven and randomized test cases:
  1254  //   - both local and global keys
  1255  //   - repeated lock acquisition at same seqnums, different seqnums, epoch changes
  1256  //   - updates with ignored seqs
  1257  //   - error paths
  1258  // - Test with concurrency in lockTable calls.
  1259  //   - test for race in gc'ing lock that has since become non-empty or new
  1260  //     non-empty one has been inserted.