github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/store_test.go (about)

     1  // Copyright 2014 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package kvserver
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"fmt"
    17  	"math"
    18  	"math/rand"
    19  	"reflect"
    20  	"sort"
    21  	"sync"
    22  	"sync/atomic"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/cockroachdb/cockroach/pkg/base"
    27  	"github.com/cockroachdb/cockroach/pkg/clusterversion"
    28  	"github.com/cockroachdb/cockroach/pkg/config"
    29  	"github.com/cockroachdb/cockroach/pkg/config/zonepb"
    30  	"github.com/cockroachdb/cockroach/pkg/gossip"
    31  	"github.com/cockroachdb/cockroach/pkg/keys"
    32  	"github.com/cockroachdb/cockroach/pkg/kv"
    33  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval"
    34  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
    35  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
    36  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer"
    37  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader"
    38  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/txnwait"
    39  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    40  	"github.com/cockroachdb/cockroach/pkg/rpc"
    41  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    42  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    43  	"github.com/cockroachdb/cockroach/pkg/storage"
    44  	"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
    45  	"github.com/cockroachdb/cockroach/pkg/testutils"
    46  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    47  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    48  	"github.com/cockroachdb/cockroach/pkg/util/log"
    49  	"github.com/cockroachdb/cockroach/pkg/util/metric"
    50  	"github.com/cockroachdb/cockroach/pkg/util/protoutil"
    51  	"github.com/cockroachdb/cockroach/pkg/util/randutil"
    52  	"github.com/cockroachdb/cockroach/pkg/util/stop"
    53  	"github.com/cockroachdb/cockroach/pkg/util/uuid"
    54  	"github.com/cockroachdb/errors"
    55  	"github.com/gogo/protobuf/proto"
    56  	"github.com/kr/pretty"
    57  	"github.com/stretchr/testify/require"
    58  	"go.etcd.io/etcd/raft"
    59  	"go.etcd.io/etcd/raft/raftpb"
    60  	"golang.org/x/time/rate"
    61  )
    62  
    63  var testIdent = roachpb.StoreIdent{
    64  	ClusterID: uuid.MakeV4(),
    65  	NodeID:    1,
    66  	StoreID:   1,
    67  }
    68  
    69  func (s *Store) TestSender() kv.Sender {
    70  	return kv.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest {
    71  		if ba.RangeID != 0 {
    72  			return ba
    73  		}
    74  
    75  		// If the client hasn't set ba.Range, we do it a favor and figure out the
    76  		// range to which the request needs to go.
    77  		//
    78  		// NOTE: We don't use keys.Range(ba.Requests) here because that does some
    79  		// validation on the batch, and some tests using this sender don't like
    80  		// that.
    81  		key, err := keys.Addr(ba.Requests[0].GetInner().Header().Key)
    82  		if err != nil {
    83  			log.Fatalf(context.Background(), "%v", err)
    84  		}
    85  
    86  		ba.RangeID = roachpb.RangeID(1)
    87  		if repl := s.LookupReplica(key); repl != nil {
    88  			ba.RangeID = repl.RangeID
    89  
    90  			// Attempt to assign a Replica descriptor to the batch if
    91  			// necessary, but don't throw an error if this fails.
    92  			if ba.Replica == (roachpb.ReplicaDescriptor{}) {
    93  				if desc, err := repl.GetReplicaDescriptor(); err == nil {
    94  					ba.Replica = desc
    95  				}
    96  			}
    97  		}
    98  		return ba
    99  	})
   100  }
   101  
   102  // testSenderFactory is an implementation of the
   103  // client.TxnSenderFactory interface.
   104  type testSenderFactory struct {
   105  	store        *Store
   106  	nonTxnSender *testSender
   107  }
   108  
   109  func (f *testSenderFactory) RootTransactionalSender(
   110  	txn *roachpb.Transaction, _ roachpb.UserPriority,
   111  ) kv.TxnSender {
   112  	return kv.NewMockTransactionalSender(
   113  		func(
   114  			ctx context.Context, _ *roachpb.Transaction, ba roachpb.BatchRequest,
   115  		) (*roachpb.BatchResponse, *roachpb.Error) {
   116  			return f.store.Send(ctx, ba)
   117  		},
   118  		txn)
   119  }
   120  
   121  func (f *testSenderFactory) LeafTransactionalSender(tis *roachpb.LeafTxnInputState) kv.TxnSender {
   122  	return kv.NewMockTransactionalSender(
   123  		func(
   124  			ctx context.Context, _ *roachpb.Transaction, ba roachpb.BatchRequest,
   125  		) (*roachpb.BatchResponse, *roachpb.Error) {
   126  			return f.store.Send(ctx, ba)
   127  		},
   128  		&tis.Txn)
   129  }
   130  
   131  func (f *testSenderFactory) NonTransactionalSender() kv.Sender {
   132  	if f.nonTxnSender != nil {
   133  		return f.nonTxnSender
   134  	}
   135  	f.nonTxnSender = &testSender{store: f.store}
   136  	return f.nonTxnSender
   137  }
   138  
   139  func (f *testSenderFactory) setStore(s *Store) {
   140  	f.store = s
   141  	if f.nonTxnSender != nil {
   142  		// monkey-patch an already created Sender, helping with test bootstrapping.
   143  		f.nonTxnSender.store = s
   144  	}
   145  }
   146  
   147  // testSender is an implementation of the client.TxnSender interface
   148  // which passes all requests through to a single store.
   149  type testSender struct {
   150  	store *Store
   151  }
   152  
   153  // Send forwards the call to the single store. This is a poor man's
   154  // version of kv.TxnCoordSender, but it serves the purposes of
   155  // supporting tests in this package. Transactions are not supported.
   156  // Since kv/ depends on storage/, we can't get access to a
   157  // TxnCoordSender from here.
   158  // TODO(tschottdorf): {kv->storage}.LocalSender
   159  func (db *testSender) Send(
   160  	ctx context.Context, ba roachpb.BatchRequest,
   161  ) (*roachpb.BatchResponse, *roachpb.Error) {
   162  	if et, ok := ba.GetArg(roachpb.EndTxn); ok {
   163  		return nil, roachpb.NewErrorf("%s method not supported", et.Method())
   164  	}
   165  	// Lookup range and direct request.
   166  	rs, err := keys.Range(ba.Requests)
   167  	if err != nil {
   168  		return nil, roachpb.NewError(err)
   169  	}
   170  	repl := db.store.LookupReplica(rs.Key)
   171  	if repl == nil || !repl.Desc().ContainsKeyRange(rs.Key, rs.EndKey) {
   172  		panic(fmt.Sprintf("didn't find right replica for key: %s", rs.Key))
   173  	}
   174  	ba.RangeID = repl.RangeID
   175  	repDesc, err := repl.GetReplicaDescriptor()
   176  	if err != nil {
   177  		return nil, roachpb.NewError(err)
   178  	}
   179  	ba.Replica = repDesc
   180  	br, pErr := db.store.Send(ctx, ba)
   181  	if br != nil && br.Error != nil {
   182  		panic(roachpb.ErrorUnexpectedlySet(db.store, br))
   183  	}
   184  	if pErr != nil {
   185  		return nil, pErr
   186  	}
   187  	return br, nil
   188  }
   189  
   190  // testStoreOpts affords control over aspects of store creation.
   191  type testStoreOpts struct {
   192  	// If createSystemRanges is not set, the store will have a single range. If
   193  	// set, the store will have all the system ranges that are generally created
   194  	// for a cluster at boostrap.
   195  	createSystemRanges bool
   196  }
   197  
   198  // createTestStoreWithoutStart creates a test store using an in-memory
   199  // engine without starting the store. It returns the store, the store
   200  // clock's manual unix nanos time and a stopper. The caller is
   201  // responsible for stopping the stopper upon completion.
   202  // Some fields of ctx are populated by this function.
   203  func createTestStoreWithoutStart(
   204  	t testing.TB, stopper *stop.Stopper, opts testStoreOpts, cfg *StoreConfig,
   205  ) *Store {
   206  	// Setup fake zone config handler.
   207  	config.TestingSetupZoneConfigHook(stopper)
   208  
   209  	rpcContext := rpc.NewContext(
   210  		cfg.AmbientCtx, &base.Config{Insecure: true}, cfg.Clock,
   211  		stopper, cfg.Settings)
   212  	server := rpc.NewServer(rpcContext) // never started
   213  	cfg.Gossip = gossip.NewTest(1, rpcContext, server, stopper, metric.NewRegistry(), cfg.DefaultZoneConfig)
   214  	cfg.StorePool = NewTestStorePool(*cfg)
   215  	// Many tests using this test harness (as opposed to higher-level
   216  	// ones like multiTestContext or TestServer) want to micro-manage
   217  	// replicas and the background queues just get in the way. The
   218  	// scanner doesn't run frequently enough to expose races reliably,
   219  	// so just disable the scanner for all tests that use this function
   220  	// instead of figuring out exactly which tests need it.
   221  	cfg.TestingKnobs.DisableScanner = true
   222  	// The scanner affects background operations; we must also disable the split
   223  	// and merge queues separately to cover event-driven splits and merges.
   224  	cfg.TestingKnobs.DisableSplitQueue = true
   225  	cfg.TestingKnobs.DisableMergeQueue = true
   226  	eng := storage.NewDefaultInMem()
   227  	stopper.AddCloser(eng)
   228  	cfg.Transport = NewDummyRaftTransport(cfg.Settings)
   229  	factory := &testSenderFactory{}
   230  	cfg.DB = kv.NewDB(cfg.AmbientCtx, factory, cfg.Clock)
   231  	store := NewStore(context.Background(), *cfg, eng, &roachpb.NodeDescriptor{NodeID: 1})
   232  	factory.setStore(store)
   233  
   234  	require.NoError(t, WriteClusterVersion(context.Background(), eng, clusterversion.TestingClusterVersion))
   235  	if err := InitEngine(
   236  		context.Background(), eng, roachpb.StoreIdent{NodeID: 1, StoreID: 1},
   237  	); err != nil {
   238  		t.Fatal(err)
   239  	}
   240  	var splits []roachpb.RKey
   241  	kvs, tableSplits := sqlbase.MakeMetadataSchema(
   242  		keys.SystemSQLCodec, cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig,
   243  	).GetInitialValues()
   244  	if opts.createSystemRanges {
   245  		splits = config.StaticSplits()
   246  		splits = append(splits, tableSplits...)
   247  		sort.Slice(splits, func(i, j int) bool {
   248  			return splits[i].Less(splits[j])
   249  		})
   250  	}
   251  	if err := WriteInitialClusterData(
   252  		context.Background(), eng, kvs, /* initialValues */
   253  		clusterversion.TestingBinaryVersion,
   254  		1 /* numStores */, splits, cfg.Clock.PhysicalNow(),
   255  	); err != nil {
   256  		t.Fatal(err)
   257  	}
   258  	return store
   259  }
   260  
   261  func createTestStore(
   262  	t testing.TB, opts testStoreOpts, stopper *stop.Stopper,
   263  ) (*Store, *hlc.ManualClock) {
   264  	manual := hlc.NewManualClock(123)
   265  	cfg := TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
   266  	store := createTestStoreWithConfig(t, stopper, opts, &cfg)
   267  	return store, manual
   268  }
   269  
   270  // createTestStore creates a test store using an in-memory
   271  // engine. It returns the store, the store clock's manual unix nanos time
   272  // and a stopper. The caller is responsible for stopping the stopper
   273  // upon completion.
   274  func createTestStoreWithConfig(
   275  	t testing.TB, stopper *stop.Stopper, opts testStoreOpts, cfg *StoreConfig,
   276  ) *Store {
   277  	store := createTestStoreWithoutStart(t, stopper, opts, cfg)
   278  	// Put an empty system config into gossip.
   279  	if err := store.Gossip().AddInfoProto(gossip.KeySystemConfig,
   280  		&config.SystemConfigEntries{}, 0); err != nil {
   281  		t.Fatal(err)
   282  	}
   283  	if err := store.Start(context.Background(), stopper); err != nil {
   284  		t.Fatal(err)
   285  	}
   286  	store.WaitForInit()
   287  	return store
   288  }
   289  
   290  // TestIterateIDPrefixKeys lays down a number of tombstones (at keys.RangeTombstoneKey) interspersed
   291  // with other irrelevant keys (both chosen randomly). It then verifies that IterateIDPrefixKeys
   292  // correctly returns only the relevant keys and values.
   293  func TestIterateIDPrefixKeys(t *testing.T) {
   294  	defer leaktest.AfterTest(t)()
   295  
   296  	ctx := context.Background()
   297  	stopper := stop.NewStopper()
   298  	defer stopper.Stop(ctx)
   299  
   300  	eng := storage.NewDefaultInMem()
   301  	stopper.AddCloser(eng)
   302  
   303  	seed := randutil.NewPseudoSeed()
   304  	//const seed = -1666367124291055473
   305  	t.Logf("seed is %d", seed)
   306  	rng := rand.New(rand.NewSource(seed))
   307  
   308  	ops := []func(rangeID roachpb.RangeID) roachpb.Key{
   309  		keys.RaftAppliedIndexLegacyKey, // replicated; sorts before tombstone
   310  		keys.RaftHardStateKey,          // unreplicated; sorts after tombstone
   311  		// Replicated key-anchored local key (i.e. not one we should care about).
   312  		// Will be written at zero timestamp, but that's ok.
   313  		func(rangeID roachpb.RangeID) roachpb.Key {
   314  			return keys.RangeDescriptorKey([]byte(fmt.Sprintf("fakerange%d", rangeID)))
   315  		},
   316  		func(rangeID roachpb.RangeID) roachpb.Key {
   317  			return roachpb.Key(fmt.Sprintf("fakeuserkey%d", rangeID))
   318  		},
   319  	}
   320  
   321  	const rangeCount = 10
   322  	rangeIDFn := func() roachpb.RangeID {
   323  		return 1 + roachpb.RangeID(rng.Intn(10*rangeCount)) // spread rangeIDs out
   324  	}
   325  
   326  	// Write a number of keys that should be irrelevant to the iteration in this test.
   327  	for i := 0; i < rangeCount; i++ {
   328  		rangeID := rangeIDFn()
   329  
   330  		// Grab between one and all ops, randomly.
   331  		for _, opIdx := range rng.Perm(len(ops))[:rng.Intn(1+len(ops))] {
   332  			key := ops[opIdx](rangeID)
   333  			t.Logf("writing op=%d rangeID=%d", opIdx, rangeID)
   334  			if err := storage.MVCCPut(
   335  				ctx,
   336  				eng,
   337  				nil, /* ms */
   338  				key,
   339  				hlc.Timestamp{},
   340  				roachpb.MakeValueFromString("fake value for "+key.String()),
   341  				nil, /* txn */
   342  			); err != nil {
   343  				t.Fatal(err)
   344  			}
   345  		}
   346  	}
   347  
   348  	type seenT struct {
   349  		rangeID   roachpb.RangeID
   350  		tombstone roachpb.RangeTombstone
   351  	}
   352  
   353  	// Next, write the keys we're planning to see again.
   354  	var wanted []seenT
   355  	{
   356  		used := make(map[roachpb.RangeID]struct{})
   357  		for {
   358  			rangeID := rangeIDFn()
   359  			if _, ok := used[rangeID]; ok {
   360  				// We already wrote this key, so roll the dice again.
   361  				continue
   362  			}
   363  
   364  			tombstone := roachpb.RangeTombstone{
   365  				NextReplicaID: roachpb.ReplicaID(rng.Int31n(100)),
   366  			}
   367  
   368  			used[rangeID] = struct{}{}
   369  			wanted = append(wanted, seenT{rangeID: rangeID, tombstone: tombstone})
   370  
   371  			t.Logf("writing tombstone at rangeID=%d", rangeID)
   372  			if err := storage.MVCCPutProto(
   373  				ctx, eng, nil /* ms */, keys.RangeTombstoneKey(rangeID), hlc.Timestamp{}, nil /* txn */, &tombstone,
   374  			); err != nil {
   375  				t.Fatal(err)
   376  			}
   377  
   378  			if len(wanted) >= rangeCount {
   379  				break
   380  			}
   381  		}
   382  	}
   383  
   384  	sort.Slice(wanted, func(i, j int) bool {
   385  		return wanted[i].rangeID < wanted[j].rangeID
   386  	})
   387  
   388  	var seen []seenT
   389  	var tombstone roachpb.RangeTombstone
   390  
   391  	handleTombstone := func(rangeID roachpb.RangeID) (more bool, _ error) {
   392  		seen = append(seen, seenT{rangeID: rangeID, tombstone: tombstone})
   393  		return true, nil
   394  	}
   395  
   396  	if err := IterateIDPrefixKeys(ctx, eng, keys.RangeTombstoneKey, &tombstone, handleTombstone); err != nil {
   397  		t.Fatal(err)
   398  	}
   399  	placeholder := seenT{
   400  		rangeID: roachpb.RangeID(9999),
   401  	}
   402  
   403  	if len(wanted) != len(seen) {
   404  		t.Errorf("wanted %d results, got %d", len(wanted), len(seen))
   405  	}
   406  
   407  	for len(wanted) < len(seen) {
   408  		wanted = append(wanted, placeholder)
   409  	}
   410  	for len(seen) < len(wanted) {
   411  		seen = append(seen, placeholder)
   412  	}
   413  
   414  	if diff := pretty.Diff(wanted, seen); len(diff) > 0 {
   415  		pretty.Ldiff(t, wanted, seen)
   416  		t.Fatal("diff(wanted, seen) is nonempty")
   417  	}
   418  }
   419  
   420  // TestStoreInitAndBootstrap verifies store initialization and bootstrap.
   421  func TestStoreInitAndBootstrap(t *testing.T) {
   422  	defer leaktest.AfterTest(t)()
   423  
   424  	// We need a fixed clock to avoid LastUpdateNanos drifting on us.
   425  	cfg := TestStoreConfig(hlc.NewClock(func() int64 { return 123 }, time.Nanosecond))
   426  	stopper := stop.NewStopper()
   427  	ctx := context.Background()
   428  	defer stopper.Stop(ctx)
   429  	eng := storage.NewDefaultInMem()
   430  	stopper.AddCloser(eng)
   431  	cfg.Transport = NewDummyRaftTransport(cfg.Settings)
   432  	factory := &testSenderFactory{}
   433  	cfg.DB = kv.NewDB(cfg.AmbientCtx, factory, cfg.Clock)
   434  	{
   435  		store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1})
   436  		// Can't start as haven't bootstrapped.
   437  		if err := store.Start(ctx, stopper); err == nil {
   438  			t.Error("expected failure starting un-bootstrapped store")
   439  		}
   440  
   441  		require.NoError(t, WriteClusterVersion(context.Background(), eng, clusterversion.TestingClusterVersion))
   442  		// Bootstrap with a fake ident.
   443  		if err := InitEngine(ctx, eng, testIdent); err != nil {
   444  			t.Fatalf("error bootstrapping store: %+v", err)
   445  		}
   446  
   447  		// Verify we can read the store ident after a flush.
   448  		if err := eng.Flush(); err != nil {
   449  			t.Fatal(err)
   450  		}
   451  		if _, err := ReadStoreIdent(ctx, eng); err != nil {
   452  			t.Fatalf("unable to read store ident: %+v", err)
   453  		}
   454  
   455  		// Bootstrap the system ranges.
   456  		var splits []roachpb.RKey
   457  		kvs, tableSplits := sqlbase.MakeMetadataSchema(
   458  			keys.SystemSQLCodec, cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig,
   459  		).GetInitialValues()
   460  		splits = config.StaticSplits()
   461  		splits = append(splits, tableSplits...)
   462  		sort.Slice(splits, func(i, j int) bool {
   463  			return splits[i].Less(splits[j])
   464  		})
   465  
   466  		if err := WriteInitialClusterData(
   467  			ctx, eng, kvs /* initialValues */, clusterversion.TestingBinaryVersion,
   468  			1 /* numStores */, splits, cfg.Clock.PhysicalNow(),
   469  		); err != nil {
   470  			t.Errorf("failure to create first range: %+v", err)
   471  		}
   472  	}
   473  
   474  	// Now, attempt to initialize a store with a now-bootstrapped range.
   475  	store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1})
   476  	if err := store.Start(ctx, stopper); err != nil {
   477  		t.Fatalf("failure initializing bootstrapped store: %+v", err)
   478  	}
   479  
   480  	for i := 1; i <= store.ReplicaCount(); i++ {
   481  		r, err := store.GetReplica(roachpb.RangeID(i))
   482  		if err != nil {
   483  			t.Fatalf("failure fetching range %d: %+v", i, err)
   484  		}
   485  		rs := r.GetMVCCStats()
   486  
   487  		// Stats should agree with a recomputation.
   488  		now := r.store.Clock().Now()
   489  		if ms, err := rditer.ComputeStatsForRange(r.Desc(), eng, now.WallTime); err != nil {
   490  			t.Errorf("failure computing range's stats: %+v", err)
   491  		} else if ms != rs {
   492  			t.Errorf("expected range's stats to agree with recomputation: %s", pretty.Diff(ms, rs))
   493  		}
   494  	}
   495  }
   496  
   497  // TestInitializeEngineErrors verifies bootstrap failure if engine
   498  // is not empty.
   499  func TestInitializeEngineErrors(t *testing.T) {
   500  	defer leaktest.AfterTest(t)()
   501  	stopper := stop.NewStopper()
   502  	ctx := context.Background()
   503  	defer stopper.Stop(ctx)
   504  	eng := storage.NewDefaultInMem()
   505  	stopper.AddCloser(eng)
   506  
   507  	// Bootstrap should fail if engine has no cluster version yet.
   508  	if err := InitEngine(ctx, eng, testIdent); !testutils.IsError(err, `no cluster version`) {
   509  		t.Fatalf("unexpected error: %v", err)
   510  	}
   511  
   512  	require.NoError(t, WriteClusterVersion(ctx, eng, clusterversion.TestingClusterVersion))
   513  
   514  	// Put some random garbage into the engine.
   515  	require.NoError(t, eng.Put(storage.MakeMVCCMetadataKey(roachpb.Key("foo")), []byte("bar")))
   516  
   517  	cfg := TestStoreConfig(nil)
   518  	cfg.Transport = NewDummyRaftTransport(cfg.Settings)
   519  	store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1})
   520  
   521  	// Can't init as haven't bootstrapped.
   522  	if err := store.Start(ctx, stopper); !errors.HasType(err, (*NotBootstrappedError)(nil)) {
   523  		t.Errorf("unexpected error initializing un-bootstrapped store: %+v", err)
   524  	}
   525  
   526  	// Bootstrap should fail on non-empty engine.
   527  	if err := InitEngine(ctx, eng, testIdent); !testutils.IsError(err, `cannot be bootstrapped`) {
   528  		t.Fatalf("unexpected error: %v", err)
   529  	}
   530  }
   531  
   532  // create a Replica and add it to the store. Note that replicas
   533  // created in this way do not have their raft groups fully initialized
   534  // so most KV operations will not work on them. This function is
   535  // deprecated; new tests should create replicas by splitting from a
   536  // properly-bootstrapped initial range.
   537  func createReplica(s *Store, rangeID roachpb.RangeID, start, end roachpb.RKey) *Replica {
   538  	desc := &roachpb.RangeDescriptor{
   539  		RangeID:  rangeID,
   540  		StartKey: start,
   541  		EndKey:   end,
   542  		InternalReplicas: []roachpb.ReplicaDescriptor{{
   543  			NodeID:    1,
   544  			StoreID:   1,
   545  			ReplicaID: 1,
   546  		}},
   547  		NextReplicaID: 2,
   548  	}
   549  	r, err := newReplica(context.Background(), desc, s, 1)
   550  	if err != nil {
   551  		log.Fatalf(context.Background(), "%v", err)
   552  	}
   553  	return r
   554  }
   555  
   556  func TestStoreAddRemoveRanges(t *testing.T) {
   557  	defer leaktest.AfterTest(t)()
   558  	stopper := stop.NewStopper()
   559  	defer stopper.Stop(context.Background())
   560  	store, _ := createTestStore(t,
   561  		testStoreOpts{
   562  			// This test was written before test stores could start with more than one
   563  			// range and was not adapted.
   564  			createSystemRanges: false,
   565  		},
   566  		stopper)
   567  	if _, err := store.GetReplica(0); err == nil {
   568  		t.Error("expected GetRange to fail on missing range")
   569  	}
   570  	// Range 1 already exists. Make sure we can fetch it.
   571  	repl1, err := store.GetReplica(1)
   572  	if err != nil {
   573  		t.Error(err)
   574  	}
   575  	// Remove range 1.
   576  	if err := store.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{
   577  		DestroyData: true,
   578  	}); err != nil {
   579  		t.Error(err)
   580  	}
   581  	// Create a new range (id=2).
   582  	repl2 := createReplica(store, 2, roachpb.RKey("a"), roachpb.RKey("b"))
   583  	if err := store.AddReplica(repl2); err != nil {
   584  		t.Fatal(err)
   585  	}
   586  	// Try to add the same range twice
   587  	err = store.AddReplica(repl2)
   588  	if err == nil {
   589  		t.Fatal("expected error re-adding same range")
   590  	}
   591  	// Try to remove range 1 again.
   592  	if err := store.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{
   593  		DestroyData: true,
   594  	}); err != nil {
   595  		t.Fatalf("didn't expect error re-removing same range: %v", err)
   596  	}
   597  	// Try to add a range with previously-used (but now removed) ID.
   598  	repl2Dup := createReplica(store, 1, roachpb.RKey("a"), roachpb.RKey("b"))
   599  	if err := store.AddReplica(repl2Dup); err == nil {
   600  		t.Fatal("expected error inserting a duplicated range")
   601  	}
   602  	// Add another range with different key range and then test lookup.
   603  	repl3 := createReplica(store, 3, roachpb.RKey("c"), roachpb.RKey("d"))
   604  	if err := store.AddReplica(repl3); err != nil {
   605  		t.Fatal(err)
   606  	}
   607  
   608  	testCases := []struct {
   609  		key    roachpb.RKey
   610  		expRng *Replica
   611  	}{
   612  		{roachpb.RKey("a"), repl2},
   613  		{roachpb.RKey("a\xff\xff"), repl2},
   614  		{roachpb.RKey("c"), repl3},
   615  		{roachpb.RKey("c\xff\xff"), repl3},
   616  		{roachpb.RKey("x60\xff\xff"), nil},
   617  		{roachpb.RKey("x60\xff\xff"), nil},
   618  		{roachpb.RKey("d"), nil},
   619  	}
   620  
   621  	for i, test := range testCases {
   622  		if r := store.LookupReplica(test.key); r != test.expRng {
   623  			t.Errorf("%d: expected range %v; got %v", i, test.expRng, r)
   624  		}
   625  	}
   626  }
   627  
   628  // TestReplicasByKey tests that operations that depend on the
   629  // store.replicasByKey map function correctly when the underlying replicas'
   630  // start and end keys are manipulated in place. This mutation happens when a
   631  // snapshot is applied that advances a replica past a split.
   632  func TestReplicasByKey(t *testing.T) {
   633  	defer leaktest.AfterTest(t)()
   634  	stopper := stop.NewStopper()
   635  	defer stopper.Stop(context.Background())
   636  	store, _ := createTestStore(t,
   637  		testStoreOpts{
   638  			// This test was written before test stores could start with more than one
   639  			// range and was not adapted.
   640  			createSystemRanges: false,
   641  		},
   642  		stopper)
   643  
   644  	// Shrink the main replica.
   645  	rep, err := store.GetReplica(1)
   646  	if err != nil {
   647  		t.Fatal(err)
   648  	}
   649  
   650  	rep.mu.Lock()
   651  	desc := *rep.mu.state.Desc // shallow copy to replace desc wholesale
   652  	desc.EndKey = roachpb.RKey("e")
   653  	rep.mu.state.Desc = &desc
   654  	rep.mu.Unlock()
   655  
   656  	// Ensure that this shrinkage is recognized by future additions to replicasByKey.
   657  	reps := []*struct {
   658  		replica            *Replica
   659  		id                 int
   660  		start, end         roachpb.RKey
   661  		expectedErrorOnAdd string
   662  	}{
   663  		// [a,c) is contained in [KeyMin, e)
   664  		{nil, 2, roachpb.RKey("a"), roachpb.RKey("c"), ".*has overlapping range"},
   665  		// [c,f) partially overlaps with [KeyMin, e)
   666  		{nil, 3, roachpb.RKey("c"), roachpb.RKey("f"), ".*has overlapping range"},
   667  		// [e, f) is disjoint from [KeyMin, e)
   668  		{nil, 4, roachpb.RKey("e"), roachpb.RKey("f"), ""},
   669  	}
   670  
   671  	for i, desc := range reps {
   672  		desc.replica = createReplica(store, roachpb.RangeID(desc.id), desc.start, desc.end)
   673  		err := store.AddReplica(desc.replica)
   674  		if !testutils.IsError(err, desc.expectedErrorOnAdd) {
   675  			t.Fatalf("adding replica %d: expected err %q, but encountered %v", i, desc.expectedErrorOnAdd, err)
   676  		}
   677  	}
   678  }
   679  
   680  func TestStoreRemoveReplicaDestroy(t *testing.T) {
   681  	defer leaktest.AfterTest(t)()
   682  	stopper := stop.NewStopper()
   683  	defer stopper.Stop(context.Background())
   684  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
   685  
   686  	repl1, err := store.GetReplica(1)
   687  	if err != nil {
   688  		t.Fatal(err)
   689  	}
   690  	if err := store.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{
   691  		DestroyData: true,
   692  	}); err != nil {
   693  		t.Fatal(err)
   694  	}
   695  
   696  	// Verify that removal of a replica marks it as destroyed so that future raft
   697  	// commands on the Replica will silently be dropped.
   698  	err = repl1.withRaftGroup(true, func(r *raft.RawNode) (bool, error) {
   699  		return true, errors.Errorf("unexpectedly created a raft group")
   700  	})
   701  	require.Equal(t, errRemoved, err)
   702  
   703  	repl1.mu.RLock()
   704  	expErr := repl1.mu.destroyStatus.err
   705  	repl1.mu.RUnlock()
   706  
   707  	if expErr == nil {
   708  		t.Fatal("replica was not marked as destroyed")
   709  	}
   710  
   711  	st := &kvserverpb.LeaseStatus{Timestamp: repl1.Clock().Now()}
   712  	if err = repl1.checkExecutionCanProceed(&roachpb.BatchRequest{}, nil /* g */, st); !errors.Is(err, expErr) {
   713  		t.Fatalf("expected error %s, but got %v", expErr, err)
   714  	}
   715  }
   716  
   717  func TestStoreReplicaVisitor(t *testing.T) {
   718  	defer leaktest.AfterTest(t)()
   719  	stopper := stop.NewStopper()
   720  	defer stopper.Stop(context.Background())
   721  	store, _ := createTestStore(t,
   722  		testStoreOpts{
   723  			// This test was written before test stores could start with more than one
   724  			// range and was not adapted.
   725  			createSystemRanges: false,
   726  		},
   727  		stopper)
   728  
   729  	// Remove range 1.
   730  	repl1, err := store.GetReplica(1)
   731  	if err != nil {
   732  		t.Error(err)
   733  	}
   734  	if err := store.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{
   735  		DestroyData: true,
   736  	}); err != nil {
   737  		t.Error(err)
   738  	}
   739  
   740  	// Add 10 new ranges.
   741  	const newCount = 10
   742  	for i := 0; i < newCount; i++ {
   743  		repl := createReplica(store, roachpb.RangeID(i+1), roachpb.RKey(fmt.Sprintf("a%02d", i)), roachpb.RKey(fmt.Sprintf("a%02d", i+1)))
   744  		if err := store.AddReplica(repl); err != nil {
   745  			t.Fatal(err)
   746  		}
   747  	}
   748  
   749  	// Verify two passes of the visit, the second one in-order.
   750  	visitor := newStoreReplicaVisitor(store)
   751  	exp := make(map[roachpb.RangeID]struct{})
   752  	for i := 0; i < newCount; i++ {
   753  		exp[roachpb.RangeID(i+1)] = struct{}{}
   754  	}
   755  
   756  	for pass := 0; pass < 2; pass++ {
   757  		if ec := visitor.EstimatedCount(); ec != 10 {
   758  			t.Fatalf("expected 10 remaining; got %d", ec)
   759  		}
   760  		i := 1
   761  		seen := make(map[roachpb.RangeID]struct{})
   762  
   763  		// Ensure that our next pass is done in-order.
   764  		if pass == 1 {
   765  			_ = visitor.InOrder()
   766  		}
   767  		var lastRangeID roachpb.RangeID
   768  		visitor.Visit(func(repl *Replica) bool {
   769  			if pass == 1 {
   770  				if repl.RangeID <= lastRangeID {
   771  					t.Fatalf("on second pass, expect ranges to be visited in ascending range ID order; %d !> %d", repl.RangeID, lastRangeID)
   772  				}
   773  				lastRangeID = repl.RangeID
   774  			}
   775  			_, ok := seen[repl.RangeID]
   776  			if ok {
   777  				t.Fatalf("already saw %d", repl.RangeID)
   778  			}
   779  
   780  			seen[repl.RangeID] = struct{}{}
   781  			if ec := visitor.EstimatedCount(); ec != 10-i {
   782  				t.Fatalf(
   783  					"expected %d remaining; got %d after seeing %+v",
   784  					10-i, ec, seen,
   785  				)
   786  			}
   787  			i++
   788  			return true
   789  		})
   790  		if ec := visitor.EstimatedCount(); ec != 10 {
   791  			t.Fatalf("expected 10 remaining; got %d", ec)
   792  		}
   793  		if !reflect.DeepEqual(exp, seen) {
   794  			t.Fatalf("got %v, expected %v", seen, exp)
   795  		}
   796  	}
   797  }
   798  
   799  func TestHasOverlappingReplica(t *testing.T) {
   800  	defer leaktest.AfterTest(t)()
   801  	stopper := stop.NewStopper()
   802  	defer stopper.Stop(context.Background())
   803  	store, _ := createTestStore(t,
   804  		testStoreOpts{
   805  			// This test was written before test stores could start with more than one
   806  			// range and was not adapted.
   807  			createSystemRanges: false,
   808  		},
   809  		stopper)
   810  	if _, err := store.GetReplica(0); err == nil {
   811  		t.Error("expected GetRange to fail on missing range")
   812  	}
   813  	// Range 1 already exists. Make sure we can fetch it.
   814  	repl1, err := store.GetReplica(1)
   815  	if err != nil {
   816  		t.Error(err)
   817  	}
   818  	// Remove range 1.
   819  	if err := store.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{
   820  		DestroyData: true,
   821  	}); err != nil {
   822  		t.Error(err)
   823  	}
   824  
   825  	// Create ranges.
   826  	rngDescs := []struct {
   827  		id         int
   828  		start, end roachpb.RKey
   829  	}{
   830  		{2, roachpb.RKey("b"), roachpb.RKey("c")},
   831  		{3, roachpb.RKey("c"), roachpb.RKey("d")},
   832  		{4, roachpb.RKey("d"), roachpb.RKey("f")},
   833  	}
   834  
   835  	for _, desc := range rngDescs {
   836  		repl := createReplica(store, roachpb.RangeID(desc.id), desc.start, desc.end)
   837  		if err := store.AddReplica(repl); err != nil {
   838  			t.Fatal(err)
   839  		}
   840  	}
   841  
   842  	testCases := []struct {
   843  		start, end roachpb.RKey
   844  		exp        bool
   845  	}{
   846  		{roachpb.RKey("a"), roachpb.RKey("c"), true},
   847  		{roachpb.RKey("b"), roachpb.RKey("c"), true},
   848  		{roachpb.RKey("b"), roachpb.RKey("d"), true},
   849  		{roachpb.RKey("d"), roachpb.RKey("e"), true},
   850  		{roachpb.RKey("d"), roachpb.RKey("g"), true},
   851  		{roachpb.RKey("e"), roachpb.RKey("e\x00"), true},
   852  
   853  		{roachpb.RKey("f"), roachpb.RKey("g"), false},
   854  		{roachpb.RKey("a"), roachpb.RKey("b"), false},
   855  	}
   856  
   857  	for i, test := range testCases {
   858  		rngDesc := &roachpb.RangeDescriptor{StartKey: test.start, EndKey: test.end}
   859  		if r := store.getOverlappingKeyRangeLocked(rngDesc) != nil; r != test.exp {
   860  			t.Errorf("%d: expected range %v; got %v", i, test.exp, r)
   861  		}
   862  	}
   863  }
   864  
   865  func TestLookupPrecedingReplica(t *testing.T) {
   866  	defer leaktest.AfterTest(t)()
   867  
   868  	ctx := context.Background()
   869  	stopper := stop.NewStopper()
   870  	defer stopper.Stop(ctx)
   871  	store, _ := createTestStore(t,
   872  		testStoreOpts{
   873  			// This test was written before test stores could start with more than one
   874  			// range and was not adapted.
   875  			createSystemRanges: false,
   876  		},
   877  		stopper)
   878  
   879  	// Clobber the existing range so we can test ranges that aren't KeyMin or
   880  	// KeyMax.
   881  	repl1, err := store.GetReplica(1)
   882  	if err != nil {
   883  		t.Fatal(err)
   884  	}
   885  	if err := store.RemoveReplica(ctx, repl1, repl1.Desc().NextReplicaID, RemoveOptions{
   886  		DestroyData: true,
   887  	}); err != nil {
   888  		t.Fatal(err)
   889  	}
   890  
   891  	repl2 := createReplica(store, 2, roachpb.RKey("a"), roachpb.RKey("b"))
   892  	if err := store.AddReplica(repl2); err != nil {
   893  		t.Fatal(err)
   894  	}
   895  	repl3 := createReplica(store, 3, roachpb.RKey("b"), roachpb.RKey("c"))
   896  	if err := store.AddReplica(repl3); err != nil {
   897  		t.Fatal(err)
   898  	}
   899  	if err := store.addPlaceholder(&ReplicaPlaceholder{rangeDesc: roachpb.RangeDescriptor{
   900  		RangeID: 4, StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("d"),
   901  	}}); err != nil {
   902  		t.Fatal(err)
   903  	}
   904  	repl5 := createReplica(store, 5, roachpb.RKey("e"), roachpb.RKey("f"))
   905  	if err := store.AddReplica(repl5); err != nil {
   906  		t.Fatal(err)
   907  	}
   908  
   909  	for i, tc := range []struct {
   910  		key     roachpb.RKey
   911  		expRepl *Replica
   912  	}{
   913  		{roachpb.RKeyMin, nil},
   914  		{roachpb.RKey("a"), nil},
   915  		{roachpb.RKey("aa"), nil},
   916  		{roachpb.RKey("b"), repl2},
   917  		{roachpb.RKey("bb"), repl2},
   918  		{roachpb.RKey("c"), repl3},
   919  		{roachpb.RKey("cc"), repl3},
   920  		{roachpb.RKey("d"), repl3},
   921  		{roachpb.RKey("dd"), repl3},
   922  		{roachpb.RKey("e"), repl3},
   923  		{roachpb.RKey("ee"), repl3},
   924  		{roachpb.RKey("f"), repl5},
   925  		{roachpb.RKeyMax, repl5},
   926  	} {
   927  		if repl := store.lookupPrecedingReplica(tc.key); repl != tc.expRepl {
   928  			t.Errorf("%d: expected replica %v; got %v", i, tc.expRepl, repl)
   929  		}
   930  	}
   931  }
   932  
   933  func TestMaybeMarkReplicaInitialized(t *testing.T) {
   934  	defer leaktest.AfterTest(t)()
   935  	stopper := stop.NewStopper()
   936  	defer stopper.Stop(context.Background())
   937  	store, _ := createTestStore(t,
   938  		testStoreOpts{
   939  			// This test was written before test stores could start with more than one
   940  			// range and was not adapted.
   941  			createSystemRanges: false,
   942  		},
   943  		stopper)
   944  
   945  	// Clobber the existing range so we can test overlaps that aren't KeyMin or KeyMax.
   946  	repl1, err := store.GetReplica(1)
   947  	if err != nil {
   948  		t.Error(err)
   949  	}
   950  	if err := store.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{
   951  		DestroyData: true,
   952  	}); err != nil {
   953  		t.Error(err)
   954  	}
   955  
   956  	repl := createReplica(store, roachpb.RangeID(2), roachpb.RKey("a"), roachpb.RKey("c"))
   957  	if err := store.AddReplica(repl); err != nil {
   958  		t.Fatal(err)
   959  	}
   960  
   961  	newRangeID := roachpb.RangeID(3)
   962  	desc := &roachpb.RangeDescriptor{
   963  		RangeID: newRangeID,
   964  	}
   965  
   966  	r, err := newReplica(context.Background(), desc, store, 1)
   967  	if err != nil {
   968  		t.Fatal(err)
   969  	}
   970  
   971  	store.mu.Lock()
   972  	defer store.mu.Unlock()
   973  
   974  	expectedResult := "attempted to process uninitialized range.*"
   975  	ctx := r.AnnotateCtx(context.Background())
   976  	if err := store.maybeMarkReplicaInitializedLocked(ctx, r); !testutils.IsError(err, expectedResult) {
   977  		t.Errorf("expected maybeMarkReplicaInitializedLocked with uninitialized replica to fail, got %v", err)
   978  	}
   979  
   980  	// Initialize the range with start and end keys.
   981  	desc = protoutil.Clone(desc).(*roachpb.RangeDescriptor)
   982  	desc.StartKey = roachpb.RKey("b")
   983  	desc.EndKey = roachpb.RKey("d")
   984  	desc.InternalReplicas = []roachpb.ReplicaDescriptor{{
   985  		NodeID:    1,
   986  		StoreID:   1,
   987  		ReplicaID: 1,
   988  	}}
   989  	desc.NextReplicaID = 2
   990  	r.setDescRaftMuLocked(ctx, desc)
   991  	if err := store.maybeMarkReplicaInitializedLocked(ctx, r); err != nil {
   992  		t.Errorf("expected maybeMarkReplicaInitializedLocked on a replica that's not in the uninit map to silently succeed, got %v", err)
   993  	}
   994  
   995  	store.mu.uninitReplicas[newRangeID] = r
   996  
   997  	expectedResult = ".*cannot initialize replica.*"
   998  	if err := store.maybeMarkReplicaInitializedLocked(ctx, r); !testutils.IsError(err, expectedResult) {
   999  		t.Errorf("expected maybeMarkReplicaInitializedLocked with overlapping keys to fail, got %v", err)
  1000  	}
  1001  }
  1002  
  1003  // TestStoreSend verifies straightforward command execution
  1004  // of both a read-only and a read-write command.
  1005  func TestStoreSend(t *testing.T) {
  1006  	defer leaktest.AfterTest(t)()
  1007  	stopper := stop.NewStopper()
  1008  	defer stopper.Stop(context.Background())
  1009  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1010  	gArgs := getArgs([]byte("a"))
  1011  
  1012  	// Try a successful get request.
  1013  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &gArgs); pErr != nil {
  1014  		t.Fatal(pErr)
  1015  	}
  1016  	pArgs := putArgs([]byte("a"), []byte("aaa"))
  1017  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil {
  1018  		t.Fatal(pErr)
  1019  	}
  1020  }
  1021  
  1022  // TestStoreObservedTimestamp verifies that execution of a transactional
  1023  // command on a Store always returns a timestamp observation, either per the
  1024  // error's or the response's transaction, as well as an originating NodeID.
  1025  func TestStoreObservedTimestamp(t *testing.T) {
  1026  	defer leaktest.AfterTest(t)()
  1027  	badKey := []byte("a")
  1028  	goodKey := []byte("b")
  1029  	desc := roachpb.ReplicaDescriptor{
  1030  		NodeID: 5,
  1031  		// not relevant
  1032  		StoreID:   1,
  1033  		ReplicaID: 2,
  1034  	}
  1035  
  1036  	testCases := []struct {
  1037  		key   roachpb.Key
  1038  		check func(int64, roachpb.Response, *roachpb.Error)
  1039  	}{
  1040  		{badKey,
  1041  			func(wallNanos int64, _ roachpb.Response, pErr *roachpb.Error) {
  1042  				if pErr == nil {
  1043  					t.Fatal("expected an error")
  1044  				}
  1045  				txn := pErr.GetTxn()
  1046  				if txn == nil || txn.ID == (uuid.UUID{}) {
  1047  					t.Fatalf("expected nontrivial transaction in %s", pErr)
  1048  				}
  1049  				if ts, _ := txn.GetObservedTimestamp(desc.NodeID); ts.WallTime != wallNanos {
  1050  					t.Fatalf("unexpected observed timestamps, expected %d->%d but got map %+v",
  1051  						desc.NodeID, wallNanos, txn.ObservedTimestamps)
  1052  				}
  1053  				if pErr.OriginNode != desc.NodeID {
  1054  					t.Fatalf("unexpected OriginNode %d, expected %d",
  1055  						pErr.OriginNode, desc.NodeID)
  1056  				}
  1057  
  1058  			}},
  1059  		{goodKey,
  1060  			func(wallNanos int64, pReply roachpb.Response, pErr *roachpb.Error) {
  1061  				if pErr != nil {
  1062  					t.Fatal(pErr)
  1063  				}
  1064  				txn := pReply.Header().Txn
  1065  				if txn == nil || txn.ID == (uuid.UUID{}) {
  1066  					t.Fatal("expected transactional response")
  1067  				}
  1068  				obs, _ := txn.GetObservedTimestamp(desc.NodeID)
  1069  				if act, exp := obs.WallTime, wallNanos; exp != act {
  1070  					t.Fatalf("unexpected observed wall time: %d, wanted %d", act, exp)
  1071  				}
  1072  			}},
  1073  	}
  1074  
  1075  	for _, test := range testCases {
  1076  		func() {
  1077  			manual := hlc.NewManualClock(123)
  1078  			cfg := TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
  1079  			cfg.TestingKnobs.EvalKnobs.TestingEvalFilter =
  1080  				func(filterArgs kvserverbase.FilterArgs) *roachpb.Error {
  1081  					if bytes.Equal(filterArgs.Req.Header().Key, badKey) {
  1082  						return roachpb.NewError(errors.Errorf("boom"))
  1083  					}
  1084  					return nil
  1085  				}
  1086  			stopper := stop.NewStopper()
  1087  			defer stopper.Stop(context.Background())
  1088  			store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
  1089  			txn := newTransaction("test", test.key, 1, store.cfg.Clock)
  1090  			txn.MaxTimestamp = hlc.MaxTimestamp
  1091  			pArgs := putArgs(test.key, []byte("value"))
  1092  			h := roachpb.Header{
  1093  				Txn:     txn,
  1094  				Replica: desc,
  1095  			}
  1096  			assignSeqNumsForReqs(txn, &pArgs)
  1097  			pReply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs)
  1098  			test.check(manual.UnixNano(), pReply, pErr)
  1099  		}()
  1100  	}
  1101  }
  1102  
  1103  // TestStoreAnnotateNow verifies that the Store sets Now on the batch responses.
  1104  func TestStoreAnnotateNow(t *testing.T) {
  1105  	defer leaktest.AfterTest(t)()
  1106  	ctx := context.Background()
  1107  	badKey := []byte("a")
  1108  	goodKey := []byte("b")
  1109  	desc := roachpb.ReplicaDescriptor{
  1110  		NodeID: 5,
  1111  		// not relevant
  1112  		StoreID:   1,
  1113  		ReplicaID: 2,
  1114  	}
  1115  
  1116  	testCases := []struct {
  1117  		key   roachpb.Key
  1118  		check func(*roachpb.BatchResponse, *roachpb.Error)
  1119  	}{
  1120  		{badKey,
  1121  			func(_ *roachpb.BatchResponse, pErr *roachpb.Error) {
  1122  				if pErr == nil {
  1123  					t.Fatal("expected an error")
  1124  				}
  1125  				if pErr.Now == (hlc.Timestamp{}) {
  1126  					t.Fatal("timestamp not annotated on error")
  1127  				}
  1128  			}},
  1129  		{goodKey,
  1130  			func(pReply *roachpb.BatchResponse, pErr *roachpb.Error) {
  1131  				if pErr != nil {
  1132  					t.Fatal(pErr)
  1133  				}
  1134  				if pReply.Now == (hlc.Timestamp{}) {
  1135  					t.Fatal("timestamp not annotated on batch response")
  1136  				}
  1137  			}},
  1138  	}
  1139  
  1140  	testutils.RunTrueAndFalse(t, "useTxn", func(t *testing.T, useTxn bool) {
  1141  		for _, test := range testCases {
  1142  			t.Run(test.key.String(), func(t *testing.T) {
  1143  				cfg := TestStoreConfig(nil)
  1144  				cfg.TestingKnobs.EvalKnobs.TestingEvalFilter =
  1145  					func(filterArgs kvserverbase.FilterArgs) *roachpb.Error {
  1146  						if bytes.Equal(filterArgs.Req.Header().Key, badKey) {
  1147  							return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
  1148  						}
  1149  						return nil
  1150  					}
  1151  				stopper := stop.NewStopper()
  1152  				defer stopper.Stop(ctx)
  1153  				store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
  1154  				var txn *roachpb.Transaction
  1155  				pArgs := putArgs(test.key, []byte("value"))
  1156  				if useTxn {
  1157  					txn = newTransaction("test", test.key, 1, store.cfg.Clock)
  1158  					txn.MaxTimestamp = hlc.MaxTimestamp
  1159  					assignSeqNumsForReqs(txn, &pArgs)
  1160  				}
  1161  				ba := roachpb.BatchRequest{
  1162  					Header: roachpb.Header{
  1163  						Txn:     txn,
  1164  						Replica: desc,
  1165  					},
  1166  				}
  1167  				ba.Add(&pArgs)
  1168  
  1169  				test.check(store.TestSender().Send(ctx, ba))
  1170  			})
  1171  		}
  1172  	})
  1173  }
  1174  
  1175  // TestStoreVerifyKeys checks that key length is enforced and
  1176  // that end keys must sort >= start.
  1177  func TestStoreVerifyKeys(t *testing.T) {
  1178  	defer leaktest.AfterTest(t)()
  1179  	stopper := stop.NewStopper()
  1180  	defer stopper.Stop(context.Background())
  1181  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1182  	// Try a start key == KeyMax.
  1183  	gArgs := getArgs(roachpb.KeyMax)
  1184  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &gArgs); !testutils.IsPError(pErr, "must be less than KeyMax") {
  1185  		t.Fatalf("expected error for start key == KeyMax: %v", pErr)
  1186  	}
  1187  	// Try a get with an end key specified (get requires only a start key and should fail).
  1188  	gArgs.EndKey = roachpb.KeyMax
  1189  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &gArgs); !testutils.IsPError(pErr, "must be less than KeyMax") {
  1190  		t.Fatalf("unexpected error for end key specified on a non-range-based operation: %v", pErr)
  1191  	}
  1192  	// Try a scan with end key < start key.
  1193  	sArgs := scanArgs([]byte("b"), []byte("a"))
  1194  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "must be greater than") {
  1195  		t.Fatalf("unexpected error for end key < start: %v", pErr)
  1196  	}
  1197  	// Try a scan with start key == end key.
  1198  	sArgs.Key = []byte("a")
  1199  	sArgs.EndKey = sArgs.Key
  1200  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "must be greater than") {
  1201  		t.Fatalf("unexpected error for start == end key: %v", pErr)
  1202  	}
  1203  	// Try a scan with range-local start key, but "regular" end key.
  1204  	sArgs.Key = keys.MakeRangeKey([]byte("test"), []byte("sffx"), nil)
  1205  	sArgs.EndKey = []byte("z")
  1206  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "range-local") {
  1207  		t.Fatalf("unexpected error for local start, non-local end key: %v", pErr)
  1208  	}
  1209  
  1210  	// Try a put to meta2 key which would otherwise exceed maximum key
  1211  	// length, but is accepted because of the meta prefix.
  1212  	meta2KeyMax := testutils.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax)
  1213  	pArgs := putArgs(meta2KeyMax, []byte("value"))
  1214  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil {
  1215  		t.Fatalf("unexpected error on put to meta2 value: %s", pErr)
  1216  	}
  1217  	// Try to put a range descriptor record for a start key which is
  1218  	// maximum length.
  1219  	key := append([]byte{}, roachpb.RKeyMax...)
  1220  	key[len(key)-1] = 0x01
  1221  	pArgs = putArgs(keys.RangeDescriptorKey(key), []byte("value"))
  1222  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil {
  1223  		t.Fatalf("unexpected error on put to range descriptor for KeyMax value: %s", pErr)
  1224  	}
  1225  	// Try a put to txn record for a meta2 key (note that this doesn't
  1226  	// actually happen in practice, as txn records are not put directly,
  1227  	// but are instead manipulated only through txn methods).
  1228  	pArgs = putArgs(keys.TransactionKey(meta2KeyMax, uuid.MakeV4()), []byte("value"))
  1229  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil {
  1230  		t.Fatalf("unexpected error on put to txn meta2 value: %s", pErr)
  1231  	}
  1232  }
  1233  
  1234  // TestStoreSendUpdateTime verifies that the node clock is updated.
  1235  func TestStoreSendUpdateTime(t *testing.T) {
  1236  	defer leaktest.AfterTest(t)()
  1237  	stopper := stop.NewStopper()
  1238  	defer stopper.Stop(context.Background())
  1239  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1240  	args := getArgs([]byte("a"))
  1241  	reqTS := store.cfg.Clock.Now().Add(store.cfg.Clock.MaxOffset().Nanoseconds(), 0)
  1242  	_, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Timestamp: reqTS}, &args)
  1243  	if pErr != nil {
  1244  		t.Fatal(pErr)
  1245  	}
  1246  	ts := store.cfg.Clock.Now()
  1247  	if ts.WallTime != reqTS.WallTime || ts.Logical <= reqTS.Logical {
  1248  		t.Errorf("expected store clock to advance to %s; got %s", reqTS, ts)
  1249  	}
  1250  }
  1251  
  1252  // TestStoreSendWithZeroTime verifies that no timestamp causes
  1253  // the command to assume the node's wall time.
  1254  func TestStoreSendWithZeroTime(t *testing.T) {
  1255  	defer leaktest.AfterTest(t)()
  1256  	stopper := stop.NewStopper()
  1257  	defer stopper.Stop(context.Background())
  1258  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1259  	args := getArgs([]byte("a"))
  1260  
  1261  	var ba roachpb.BatchRequest
  1262  	ba.Add(&args)
  1263  	br, pErr := store.TestSender().Send(context.Background(), ba)
  1264  	if pErr != nil {
  1265  		t.Fatal(pErr)
  1266  	}
  1267  	// The Logical time will increase over the course of the command
  1268  	// execution so we can only rely on comparing the WallTime.
  1269  	if br.Timestamp.WallTime != store.cfg.Clock.Now().WallTime {
  1270  		t.Errorf("expected reply to have store clock time %s; got %s",
  1271  			store.cfg.Clock.Now(), br.Timestamp)
  1272  	}
  1273  }
  1274  
  1275  // TestStoreSendWithClockOffset verifies that if the request
  1276  // specifies a timestamp further into the future than the node's
  1277  // maximum allowed clock offset, the cmd fails.
  1278  func TestStoreSendWithClockOffset(t *testing.T) {
  1279  	defer leaktest.AfterTest(t)()
  1280  	stopper := stop.NewStopper()
  1281  	defer stopper.Stop(context.Background())
  1282  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1283  	args := getArgs([]byte("a"))
  1284  	// Set args timestamp to exceed max offset.
  1285  	reqTS := store.cfg.Clock.Now().Add(store.cfg.Clock.MaxOffset().Nanoseconds()+1, 0)
  1286  	_, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Timestamp: reqTS}, &args)
  1287  	if !testutils.IsPError(pErr, "remote wall time is too far ahead") {
  1288  		t.Errorf("unexpected error: %v", pErr)
  1289  	}
  1290  }
  1291  
  1292  // TestStoreSendBadRange passes a bad range.
  1293  func TestStoreSendBadRange(t *testing.T) {
  1294  	defer leaktest.AfterTest(t)()
  1295  	stopper := stop.NewStopper()
  1296  	defer stopper.Stop(context.Background())
  1297  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1298  	args := getArgs([]byte("0"))
  1299  	if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  1300  		RangeID: 2, // no such range
  1301  	}, &args); pErr == nil {
  1302  		t.Error("expected invalid range")
  1303  	}
  1304  }
  1305  
  1306  // splitTestRange splits a range. This does *not* fully emulate a real split
  1307  // and should not be used in new tests. Tests that need splits should either live in
  1308  // client_split_test.go and use AdminSplit instead of this function or use the
  1309  // TestServerInterface.
  1310  // See #702
  1311  // TODO(bdarnell): convert tests that use this function to use AdminSplit instead.
  1312  func splitTestRange(store *Store, key, splitKey roachpb.RKey, t *testing.T) *Replica {
  1313  	ctx := context.Background()
  1314  	repl := store.LookupReplica(key)
  1315  	require.NotNil(t, repl)
  1316  	rangeID, err := store.AllocateRangeID(ctx)
  1317  	require.NoError(t, err)
  1318  	rhsDesc := roachpb.NewRangeDescriptor(
  1319  		rangeID, splitKey, repl.Desc().EndKey, repl.Desc().Replicas())
  1320  	// Minimal amount of work to keep this deprecated machinery working: Write
  1321  	// some required Raft keys.
  1322  	_, err = stateloader.WriteInitialState(
  1323  		ctx, store.engine, enginepb.MVCCStats{}, *rhsDesc, roachpb.Lease{},
  1324  		hlc.Timestamp{}, stateloader.TruncatedStateUnreplicated,
  1325  	)
  1326  	require.NoError(t, err)
  1327  	newRng, err := newReplica(ctx, rhsDesc, store, repl.ReplicaID())
  1328  	require.NoError(t, err)
  1329  	newLeftDesc := *repl.Desc()
  1330  	newLeftDesc.EndKey = splitKey
  1331  	err = store.SplitRange(repl.AnnotateCtx(context.Background()), repl, newRng, &roachpb.SplitTrigger{
  1332  		RightDesc: *rhsDesc,
  1333  		LeftDesc:  newLeftDesc,
  1334  	})
  1335  	require.NoError(t, err)
  1336  	return newRng
  1337  }
  1338  
  1339  // TestStoreSendOutOfRange passes a key not contained
  1340  // within the range's key range.
  1341  func TestStoreSendOutOfRange(t *testing.T) {
  1342  	defer leaktest.AfterTest(t)()
  1343  	stopper := stop.NewStopper()
  1344  	defer stopper.Stop(context.Background())
  1345  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1346  
  1347  	repl2 := splitTestRange(store, roachpb.RKeyMin, roachpb.RKey(roachpb.Key("b")), t)
  1348  
  1349  	// Range 1 is from KeyMin to "b", so reading "b" from range 1 should
  1350  	// fail because it's just after the range boundary.
  1351  	args := getArgs([]byte("b"))
  1352  	if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  1353  		RangeID: 1,
  1354  	}, &args); err == nil {
  1355  		t.Error("expected key to be out of range")
  1356  	}
  1357  
  1358  	// Range 2 is from "b" to KeyMax, so reading "a" from range 2 should
  1359  	// fail because it's before the start of the range.
  1360  	args = getArgs([]byte("a"))
  1361  	if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  1362  		RangeID: repl2.RangeID,
  1363  	}, &args); err == nil {
  1364  		t.Error("expected key to be out of range")
  1365  	}
  1366  }
  1367  
  1368  // TestStoreRangeIDAllocation verifies that  range IDs are
  1369  // allocated in successive blocks.
  1370  func TestStoreRangeIDAllocation(t *testing.T) {
  1371  	defer leaktest.AfterTest(t)()
  1372  
  1373  	ctx := context.Background()
  1374  	stopper := stop.NewStopper()
  1375  	defer stopper.Stop(ctx)
  1376  	store, _ := createTestStore(t,
  1377  		testStoreOpts{
  1378  			// This test was written before test stores could start with more than one
  1379  			// range and was not adapted.
  1380  			createSystemRanges: false,
  1381  		},
  1382  		stopper)
  1383  
  1384  	// Range IDs should be allocated from ID 2 (first allocated range)
  1385  	// to rangeIDAllocCount * 3 + 1.
  1386  	for i := 0; i < rangeIDAllocCount*3; i++ {
  1387  		rangeID, err := store.AllocateRangeID(ctx)
  1388  		require.NoError(t, err)
  1389  		require.EqualValues(t, 2+i, rangeID)
  1390  	}
  1391  }
  1392  
  1393  // TestStoreReplicasByKey verifies we can lookup ranges by key using
  1394  // the sorted replicasByKey slice.
  1395  func TestStoreReplicasByKey(t *testing.T) {
  1396  	defer leaktest.AfterTest(t)()
  1397  	stopper := stop.NewStopper()
  1398  	defer stopper.Stop(context.Background())
  1399  	store, _ := createTestStore(t,
  1400  		testStoreOpts{
  1401  			// This test was written before test stores could start with more than one
  1402  			// range and was not adapted.
  1403  			createSystemRanges: false,
  1404  		},
  1405  		stopper)
  1406  
  1407  	r0 := store.LookupReplica(roachpb.RKeyMin)
  1408  	r1 := splitTestRange(store, roachpb.RKeyMin, roachpb.RKey("A"), t)
  1409  	r2 := splitTestRange(store, roachpb.RKey("A"), roachpb.RKey("C"), t)
  1410  	r3 := splitTestRange(store, roachpb.RKey("C"), roachpb.RKey("X"), t)
  1411  	r4 := splitTestRange(store, roachpb.RKey("X"), roachpb.RKey("ZZ"), t)
  1412  
  1413  	if r := store.LookupReplica(roachpb.RKey("0")); r != r0 {
  1414  		t.Errorf("mismatched replica %s != %s", r, r0)
  1415  	}
  1416  	if r := store.LookupReplica(roachpb.RKey("B")); r != r1 {
  1417  		t.Errorf("mismatched replica %s != %s", r, r1)
  1418  	}
  1419  	if r := store.LookupReplica(roachpb.RKey("C")); r != r2 {
  1420  		t.Errorf("mismatched replica %s != %s", r, r2)
  1421  	}
  1422  	if r := store.LookupReplica(roachpb.RKey("M")); r != r2 {
  1423  		t.Errorf("mismatched replica %s != %s", r, r2)
  1424  	}
  1425  	if r := store.LookupReplica(roachpb.RKey("X")); r != r3 {
  1426  		t.Errorf("mismatched replica %s != %s", r, r3)
  1427  	}
  1428  	if r := store.LookupReplica(roachpb.RKey("Z")); r != r3 {
  1429  		t.Errorf("mismatched replica %s != %s", r, r3)
  1430  	}
  1431  	if r := store.LookupReplica(roachpb.RKey("ZZ")); r != r4 {
  1432  		t.Errorf("mismatched replica %s != %s", r, r4)
  1433  	}
  1434  	if r := store.LookupReplica(roachpb.RKey("\xff\x00")); r != r4 {
  1435  		t.Errorf("mismatched replica %s != %s", r, r4)
  1436  	}
  1437  	if store.LookupReplica(roachpb.RKeyMax) != nil {
  1438  		t.Errorf("expected roachpb.KeyMax to not have an associated replica")
  1439  	}
  1440  }
  1441  
  1442  // TestStoreSetRangesMaxBytes creates a set of ranges via splitting
  1443  // and then sets the config zone to a custom max bytes value to
  1444  // verify the ranges' max bytes are updated appropriately.
  1445  func TestStoreSetRangesMaxBytes(t *testing.T) {
  1446  	defer leaktest.AfterTest(t)()
  1447  	stopper := stop.NewStopper()
  1448  	defer stopper.Stop(context.Background())
  1449  	cfg := TestStoreConfig(nil)
  1450  	cfg.TestingKnobs.DisableMergeQueue = true
  1451  	store := createTestStoreWithConfig(t, stopper,
  1452  		testStoreOpts{
  1453  			// This test was written before test stores could start with more than one
  1454  			// range and was not adapted.
  1455  			createSystemRanges: false,
  1456  		},
  1457  		&cfg)
  1458  
  1459  	baseID := uint32(keys.MinUserDescID)
  1460  	testData := []struct {
  1461  		repl        *Replica
  1462  		expMaxBytes int64
  1463  	}{
  1464  		{store.LookupReplica(roachpb.RKeyMin),
  1465  			*store.cfg.DefaultZoneConfig.RangeMaxBytes},
  1466  		{splitTestRange(
  1467  			store, roachpb.RKeyMin, roachpb.RKey(keys.SystemSQLCodec.TablePrefix(baseID)), t),
  1468  			1 << 20},
  1469  		{splitTestRange(
  1470  			store, roachpb.RKey(keys.SystemSQLCodec.TablePrefix(baseID)), roachpb.RKey(keys.SystemSQLCodec.TablePrefix(baseID+1)), t),
  1471  			*store.cfg.DefaultZoneConfig.RangeMaxBytes},
  1472  		{splitTestRange(
  1473  			store, roachpb.RKey(keys.SystemSQLCodec.TablePrefix(baseID+1)), roachpb.RKey(keys.SystemSQLCodec.TablePrefix(baseID+2)), t),
  1474  			2 << 20},
  1475  	}
  1476  
  1477  	// Set zone configs.
  1478  	config.TestingSetZoneConfig(baseID, zonepb.ZoneConfig{RangeMaxBytes: proto.Int64(1 << 20)})
  1479  	config.TestingSetZoneConfig(baseID+2, zonepb.ZoneConfig{RangeMaxBytes: proto.Int64(2 << 20)})
  1480  
  1481  	// Despite faking the zone configs, we still need to have a system config
  1482  	// entry so that the store picks up the new zone configs. This new system
  1483  	// config needs to be non-empty so that it differs from the initial value
  1484  	// which triggers the system config callback to be run.
  1485  	sysCfg := &config.SystemConfigEntries{}
  1486  	sysCfg.Values = []roachpb.KeyValue{{Key: roachpb.Key("a")}}
  1487  	if err := store.Gossip().AddInfoProto(gossip.KeySystemConfig, sysCfg, 0); err != nil {
  1488  		t.Fatal(err)
  1489  	}
  1490  
  1491  	testutils.SucceedsSoon(t, func() error {
  1492  		for _, test := range testData {
  1493  			if mb := test.repl.GetMaxBytes(); mb != test.expMaxBytes {
  1494  				return errors.Errorf("range max bytes values did not change to %d; got %d", test.expMaxBytes, mb)
  1495  			}
  1496  		}
  1497  		return nil
  1498  	})
  1499  }
  1500  
  1501  // TestStoreResolveWriteIntent adds a write intent and then verifies
  1502  // that a put returns success and aborts intent's txn in the event the
  1503  // pushee has lower priority. Otherwise, verifies that the put blocks
  1504  // until the original txn is ended.
  1505  func TestStoreResolveWriteIntent(t *testing.T) {
  1506  	defer leaktest.AfterTest(t)()
  1507  
  1508  	manual := hlc.NewManualClock(123)
  1509  	cfg := TestStoreConfig(hlc.NewClock(manual.UnixNano, 1000*time.Nanosecond))
  1510  	cfg.TestingKnobs.EvalKnobs.TestingEvalFilter =
  1511  		func(filterArgs kvserverbase.FilterArgs) *roachpb.Error {
  1512  			pr, ok := filterArgs.Req.(*roachpb.PushTxnRequest)
  1513  			if !ok || pr.PusherTxn.Name != "test" {
  1514  				return nil
  1515  			}
  1516  			if exp, act := manual.UnixNano(), pr.PushTo.WallTime; exp > act {
  1517  				return roachpb.NewError(fmt.Errorf("expected PushTo > WallTime, but got %d < %d:\n%+v", act, exp, pr))
  1518  			}
  1519  			return nil
  1520  		}
  1521  	stopper := stop.NewStopper()
  1522  	defer stopper.Stop(context.Background())
  1523  	store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
  1524  
  1525  	for i, resolvable := range []bool{true, false} {
  1526  		key := roachpb.Key(fmt.Sprintf("key-%d", i))
  1527  		pusher := newTransaction("test", key, 1, store.cfg.Clock)
  1528  		pushee := newTransaction("test", key, 1, store.cfg.Clock)
  1529  		if resolvable {
  1530  			pushee.Priority = enginepb.MinTxnPriority
  1531  			pusher.Priority = enginepb.MaxTxnPriority // Pusher will win.
  1532  		} else {
  1533  			pushee.Priority = enginepb.MaxTxnPriority
  1534  			pusher.Priority = enginepb.MinTxnPriority // Pusher will lose.
  1535  		}
  1536  
  1537  		// First lay down intent using the pushee's txn.
  1538  		pArgs := putArgs(key, []byte("value"))
  1539  		h := roachpb.Header{Txn: pushee}
  1540  		assignSeqNumsForReqs(pushee, &pArgs)
  1541  		if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs); err != nil {
  1542  			t.Fatal(err)
  1543  		}
  1544  
  1545  		manual.Increment(100)
  1546  		// Now, try a put using the pusher's txn.
  1547  		h.Txn = pusher
  1548  		resultCh := make(chan *roachpb.Error, 1)
  1549  		go func() {
  1550  			_, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs)
  1551  			resultCh <- pErr
  1552  		}()
  1553  
  1554  		if resolvable {
  1555  			if pErr := <-resultCh; pErr != nil {
  1556  				t.Fatalf("expected intent resolved; got unexpected error: %s", pErr)
  1557  			}
  1558  			txnKey := keys.TransactionKey(pushee.Key, pushee.ID)
  1559  			var txn roachpb.Transaction
  1560  			if ok, err := storage.MVCCGetProto(
  1561  				context.Background(), store.Engine(), txnKey, hlc.Timestamp{}, &txn, storage.MVCCGetOptions{},
  1562  			); err != nil {
  1563  				t.Fatal(err)
  1564  			} else if ok {
  1565  				t.Fatalf("expected transaction record; got %s", txn)
  1566  			}
  1567  		} else {
  1568  			select {
  1569  			case pErr := <-resultCh:
  1570  				t.Fatalf("did not expect put to complete with lower priority: %s", pErr)
  1571  			case <-time.After(10 * time.Millisecond):
  1572  				// Send an end transaction to allow the original push to complete.
  1573  				etArgs, h := endTxnArgs(pushee, true)
  1574  				assignSeqNumsForReqs(pushee, &etArgs)
  1575  				if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil {
  1576  					t.Fatal(pErr)
  1577  				}
  1578  				if pErr := <-resultCh; pErr != nil {
  1579  					t.Fatalf("expected successful put after pushee txn ended; got %s", pErr)
  1580  				}
  1581  			}
  1582  		}
  1583  	}
  1584  }
  1585  
  1586  // TestStoreResolveWriteIntentRollback verifies that resolving a write
  1587  // intent by aborting it yields the previous value.
  1588  func TestStoreResolveWriteIntentRollback(t *testing.T) {
  1589  	defer leaktest.AfterTest(t)()
  1590  	stopper := stop.NewStopper()
  1591  	defer stopper.Stop(context.Background())
  1592  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1593  
  1594  	key := roachpb.Key("a")
  1595  	pusher := newTransaction("test", key, 1, store.cfg.Clock)
  1596  	pushee := newTransaction("test", key, 1, store.cfg.Clock)
  1597  	pushee.Priority = enginepb.MinTxnPriority
  1598  	pusher.Priority = enginepb.MaxTxnPriority // Pusher will win.
  1599  
  1600  	// First lay down intent using the pushee's txn.
  1601  	args := incrementArgs(key, 1)
  1602  	h := roachpb.Header{Txn: pushee}
  1603  	assignSeqNumsForReqs(pushee, args)
  1604  	if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, args); pErr != nil {
  1605  		t.Fatal(pErr)
  1606  	}
  1607  
  1608  	// Now, try a put using the pusher's txn.
  1609  	h.Txn = pusher
  1610  	args.Increment = 2
  1611  	assignSeqNumsForReqs(pusher, args)
  1612  	if resp, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, args); pErr != nil {
  1613  		t.Errorf("expected increment to succeed: %s", pErr)
  1614  	} else if reply := resp.(*roachpb.IncrementResponse); reply.NewValue != 2 {
  1615  		t.Errorf("expected rollback of earlier increment to yield increment value of 2; got %d", reply.NewValue)
  1616  	}
  1617  }
  1618  
  1619  // TestStoreResolveWriteIntentPushOnRead verifies that resolving a write intent
  1620  // for a read will push the timestamp. It tests this along a few dimensions:
  1621  // - high-priority pushes       vs. low-priority pushes
  1622  // - already pushed pushee txns vs. not already pushed pushee txns
  1623  // - PENDING pushee txn records vs. STAGING pushee txn records
  1624  func TestStoreResolveWriteIntentPushOnRead(t *testing.T) {
  1625  	defer leaktest.AfterTest(t)()
  1626  	storeCfg := TestStoreConfig(nil)
  1627  	storeCfg.TestingKnobs.DontRetryPushTxnFailures = true
  1628  	storeCfg.TestingKnobs.DontRecoverIndeterminateCommits = true
  1629  	stopper := stop.NewStopper()
  1630  	ctx := context.Background()
  1631  	defer stopper.Stop(ctx)
  1632  	store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &storeCfg)
  1633  
  1634  	testCases := []struct {
  1635  		pusherWillWin       bool   // if true, pusher will have a high enough priority to push the pushee
  1636  		pusheeAlreadyPushed bool   // if true, pushee's timestamp will be set above pusher's target timestamp
  1637  		pusheeStagingRecord bool   // if true, pushee's record is STAGING, otherwise PENDING
  1638  		expPushError        string // regexp pattern to match on run error, if not empty
  1639  		expPusheeRetry      bool   // do we expect the pushee to hit a retry error when committing?
  1640  	}{
  1641  		{
  1642  			// Insufficient priority to push.
  1643  			pusherWillWin:       false,
  1644  			pusheeAlreadyPushed: false,
  1645  			pusheeStagingRecord: false,
  1646  			expPushError:        "failed to push",
  1647  			expPusheeRetry:      false,
  1648  		},
  1649  		{
  1650  			// Successful push.
  1651  			pusherWillWin:       true,
  1652  			pusheeAlreadyPushed: false,
  1653  			pusheeStagingRecord: false,
  1654  			expPushError:        "",
  1655  			expPusheeRetry:      true,
  1656  		},
  1657  		{
  1658  			// Already pushed, no-op.
  1659  			pusherWillWin:       false,
  1660  			pusheeAlreadyPushed: true,
  1661  			pusheeStagingRecord: false,
  1662  			expPushError:        "",
  1663  			expPusheeRetry:      false,
  1664  		},
  1665  		{
  1666  			// Already pushed, no-op.
  1667  			pusherWillWin:       true,
  1668  			pusheeAlreadyPushed: true,
  1669  			pusheeStagingRecord: false,
  1670  			expPushError:        "",
  1671  			expPusheeRetry:      false,
  1672  		},
  1673  		{
  1674  			// Insufficient priority to push.
  1675  			pusherWillWin:       false,
  1676  			pusheeAlreadyPushed: false,
  1677  			pusheeStagingRecord: true,
  1678  			expPushError:        "failed to push",
  1679  			expPusheeRetry:      false,
  1680  		},
  1681  		{
  1682  			// Cannot push STAGING txn record.
  1683  			pusherWillWin:       true,
  1684  			pusheeAlreadyPushed: false,
  1685  			pusheeStagingRecord: true,
  1686  			expPushError:        "found txn in indeterminate STAGING state",
  1687  			expPusheeRetry:      false,
  1688  		},
  1689  		{
  1690  			// Already pushed the STAGING record, no-op.
  1691  			pusherWillWin:       false,
  1692  			pusheeAlreadyPushed: true,
  1693  			pusheeStagingRecord: true,
  1694  			expPushError:        "",
  1695  			expPusheeRetry:      false,
  1696  		},
  1697  		{
  1698  			// Already pushed the STAGING record, no-op.
  1699  			pusherWillWin:       true,
  1700  			pusheeAlreadyPushed: true,
  1701  			pusheeStagingRecord: true,
  1702  			expPushError:        "",
  1703  			expPusheeRetry:      false,
  1704  		},
  1705  	}
  1706  	for i, tc := range testCases {
  1707  		name := fmt.Sprintf("%d-pusherWillWin=%t,pusheePushed=%t,pusheeStaging=%t",
  1708  			i, tc.pusherWillWin, tc.pusheeAlreadyPushed, tc.pusheeStagingRecord)
  1709  		t.Run(name, func(t *testing.T) {
  1710  			key := roachpb.Key(fmt.Sprintf("key-%s", name))
  1711  
  1712  			// First, write original value. We use this value as a sentinel; we'll
  1713  			// check that we can read it later.
  1714  			{
  1715  				args := putArgs(key, []byte("value1"))
  1716  				if _, pErr := kv.SendWrapped(ctx, store.TestSender(), &args); pErr != nil {
  1717  					t.Fatal(pErr)
  1718  				}
  1719  			}
  1720  
  1721  			pusher := newTransaction("pusher", key, 1, store.cfg.Clock)
  1722  			pushee := newTransaction("pushee", key, 1, store.cfg.Clock)
  1723  
  1724  			// Set transaction priorities.
  1725  			if tc.pusherWillWin {
  1726  				pushee.Priority = enginepb.MinTxnPriority
  1727  				pusher.Priority = enginepb.MaxTxnPriority // Pusher will win.
  1728  			} else {
  1729  				pushee.Priority = enginepb.MaxTxnPriority
  1730  				pusher.Priority = enginepb.MinTxnPriority // Pusher will lose.
  1731  			}
  1732  
  1733  			// Second, lay down intent using the pushee's txn.
  1734  			{
  1735  				args := putArgs(key, []byte("value2"))
  1736  				assignSeqNumsForReqs(pushee, &args)
  1737  				h := roachpb.Header{Txn: pushee}
  1738  				if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), h, &args); pErr != nil {
  1739  					t.Fatal(pErr)
  1740  				}
  1741  			}
  1742  
  1743  			// Determine the timestamp to read at.
  1744  			readTs := store.cfg.Clock.Now()
  1745  			// Give the pusher a previous observed timestamp equal to this read
  1746  			// timestamp. This ensures that the pusher doesn't need to push the
  1747  			// intent any higher just to push it out of its uncertainty window.
  1748  			pusher.UpdateObservedTimestamp(store.Ident.NodeID, readTs)
  1749  
  1750  			// If the pushee is already pushed, update the transaction record.
  1751  			if tc.pusheeAlreadyPushed {
  1752  				pushedTs := store.cfg.Clock.Now()
  1753  				pushee.WriteTimestamp.Forward(pushedTs)
  1754  				pushee.ReadTimestamp.Forward(pushedTs)
  1755  				hb, hbH := heartbeatArgs(pushee, store.cfg.Clock.Now())
  1756  				if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), hbH, &hb); pErr != nil {
  1757  					t.Fatal(pErr)
  1758  				}
  1759  			}
  1760  
  1761  			// If the pushee is staging, update the transaction record.
  1762  			if tc.pusheeStagingRecord {
  1763  				et, etH := endTxnArgs(pushee, true)
  1764  				et.InFlightWrites = []roachpb.SequencedWrite{{Key: []byte("keyA"), Sequence: 1}}
  1765  				etReply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), etH, &et)
  1766  				if pErr != nil {
  1767  					t.Fatal(pErr)
  1768  				}
  1769  				if replyTxn := etReply.Header().Txn; replyTxn.Status != roachpb.STAGING {
  1770  					t.Fatalf("expected STAGING txn, found %v", replyTxn)
  1771  				}
  1772  			}
  1773  
  1774  			// Now, try to read value using the pusher's txn.
  1775  			pusher.ReadTimestamp.Forward(readTs)
  1776  			pusher.WriteTimestamp.Forward(readTs)
  1777  			gArgs := getArgs(key)
  1778  			assignSeqNumsForReqs(pusher, &gArgs)
  1779  			repl, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: pusher}, &gArgs)
  1780  			if tc.expPushError == "" {
  1781  				if pErr != nil {
  1782  					t.Errorf("expected read to succeed: %s", pErr)
  1783  				} else if replyBytes, err := repl.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
  1784  					t.Fatal(err)
  1785  				} else if !bytes.Equal(replyBytes, []byte("value1")) {
  1786  					t.Errorf("expected bytes to be %q, got %q", "value1", replyBytes)
  1787  				}
  1788  			} else {
  1789  				if !testutils.IsPError(pErr, tc.expPushError) {
  1790  					t.Fatalf("expected error %q, found %v", tc.expPushError, pErr)
  1791  				}
  1792  			}
  1793  
  1794  			// Finally, try to end the pushee's transaction. Check whether
  1795  			// the commit succeeds or fails.
  1796  			etArgs, etH := endTxnArgs(pushee, true)
  1797  			assignSeqNumsForReqs(pushee, &etArgs)
  1798  			_, pErr = kv.SendWrappedWith(ctx, store.TestSender(), etH, &etArgs)
  1799  			if tc.expPusheeRetry {
  1800  				if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
  1801  					t.Errorf("expected transaction retry error; got %s", pErr)
  1802  				}
  1803  			} else {
  1804  				if pErr != nil {
  1805  					t.Fatalf("expected no commit error; got %s", pErr)
  1806  				}
  1807  			}
  1808  		})
  1809  	}
  1810  }
  1811  
  1812  // TestStoreResolveWriteIntentNoTxn verifies that reads and writes
  1813  // which are not part of a transaction can push intents.
  1814  func TestStoreResolveWriteIntentNoTxn(t *testing.T) {
  1815  	defer leaktest.AfterTest(t)()
  1816  	stopper := stop.NewStopper()
  1817  	defer stopper.Stop(context.Background())
  1818  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1819  
  1820  	key := roachpb.Key("a")
  1821  	pushee := newTransaction("test", key, 1, store.cfg.Clock)
  1822  
  1823  	// First, write the pushee's txn via HeartbeatTxn request.
  1824  	hb, hbH := heartbeatArgs(pushee, pushee.WriteTimestamp)
  1825  	if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), hbH, &hb); pErr != nil {
  1826  		t.Fatal(pErr)
  1827  	}
  1828  
  1829  	// Next, lay down intent from pushee.
  1830  	args := putArgs(key, []byte("value1"))
  1831  	assignSeqNumsForReqs(pushee, &args)
  1832  	if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), hbH, &args); pErr != nil {
  1833  		t.Fatal(pErr)
  1834  	}
  1835  
  1836  	// Now, try to read outside a transaction.
  1837  	getTS := store.cfg.Clock.Now() // accessed later
  1838  	{
  1839  		gArgs := getArgs(key)
  1840  		if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  1841  			Timestamp:    getTS,
  1842  			UserPriority: roachpb.MaxUserPriority,
  1843  		}, &gArgs); pErr != nil {
  1844  			t.Errorf("expected read to succeed: %s", pErr)
  1845  		} else if gReply := reply.(*roachpb.GetResponse); gReply.Value != nil {
  1846  			t.Errorf("expected value to be nil, got %+v", gReply.Value)
  1847  		}
  1848  	}
  1849  
  1850  	{
  1851  		// Next, try to write outside of a transaction. We will succeed in pushing txn.
  1852  		putTS := store.cfg.Clock.Now()
  1853  		args.Value.SetBytes([]byte("value2"))
  1854  		if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  1855  			Timestamp:    putTS,
  1856  			UserPriority: roachpb.MaxUserPriority,
  1857  		}, &args); pErr != nil {
  1858  			t.Errorf("expected success aborting pushee's txn; got %s", pErr)
  1859  		}
  1860  	}
  1861  
  1862  	// Read pushee's txn.
  1863  	txnKey := keys.TransactionKey(pushee.Key, pushee.ID)
  1864  	var txn roachpb.Transaction
  1865  	if ok, err := storage.MVCCGetProto(
  1866  		context.Background(), store.Engine(), txnKey, hlc.Timestamp{}, &txn, storage.MVCCGetOptions{},
  1867  	); !ok || err != nil {
  1868  		t.Fatalf("not found or err: %+v", err)
  1869  	}
  1870  	if txn.Status != roachpb.ABORTED {
  1871  		t.Errorf("expected pushee to be aborted; got %s", txn.Status)
  1872  	}
  1873  
  1874  	// Verify that the pushee's timestamp was moved forward on
  1875  	// former read, since we have it available in write intent error.
  1876  	minExpTS := getTS
  1877  	minExpTS.Logical++
  1878  	if txn.WriteTimestamp.Less(minExpTS) {
  1879  		t.Errorf("expected pushee timestamp pushed to %s; got %s", minExpTS, txn.WriteTimestamp)
  1880  	}
  1881  	// Similarly, verify that pushee's priority was moved from 0
  1882  	// to MaxTxnPriority-1 during push.
  1883  	if txn.Priority != enginepb.MaxTxnPriority-1 {
  1884  		t.Errorf("expected pushee priority to be pushed to %d; got %d", enginepb.MaxTxnPriority-1, txn.Priority)
  1885  	}
  1886  
  1887  	// Finally, try to end the pushee's transaction; it should have
  1888  	// been aborted.
  1889  	etArgs, h := endTxnArgs(pushee, true)
  1890  	assignSeqNumsForReqs(pushee, &etArgs)
  1891  	_, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs)
  1892  	if pErr == nil {
  1893  		t.Errorf("unexpected success committing transaction")
  1894  	}
  1895  	if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
  1896  		t.Errorf("expected transaction aborted error; got %s", pErr)
  1897  	}
  1898  }
  1899  
  1900  func setTxnAutoGC(to bool) func() { return batcheval.TestingSetTxnAutoGC(to) }
  1901  
  1902  // TestStoreReadInconsistent verifies that gets and scans with read
  1903  // consistency set to INCONSISTENT or READ_UNCOMMITTED either push or
  1904  // simply ignore extant intents (if they cannot be pushed), depending
  1905  // on the intent priority. READ_UNCOMMITTED requests will also return
  1906  // the intents that they run into.
  1907  func TestStoreReadInconsistent(t *testing.T) {
  1908  	defer leaktest.AfterTest(t)()
  1909  
  1910  	for _, rc := range []roachpb.ReadConsistencyType{
  1911  		roachpb.READ_UNCOMMITTED,
  1912  		roachpb.INCONSISTENT,
  1913  	} {
  1914  		t.Run(rc.String(), func(t *testing.T) {
  1915  			// The test relies on being able to commit a Txn without specifying the
  1916  			// intent, while preserving the Txn record. Turn off
  1917  			// automatic cleanup for this to work.
  1918  			defer setTxnAutoGC(false)()
  1919  			stopper := stop.NewStopper()
  1920  			defer stopper.Stop(context.Background())
  1921  			store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  1922  
  1923  			for _, canPush := range []bool{true, false} {
  1924  				keyA := roachpb.Key(fmt.Sprintf("%t-a", canPush))
  1925  				keyB := roachpb.Key(fmt.Sprintf("%t-b", canPush))
  1926  
  1927  				// First, write keyA.
  1928  				args := putArgs(keyA, []byte("value1"))
  1929  				if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &args); pErr != nil {
  1930  					t.Fatal(pErr)
  1931  				}
  1932  
  1933  				// Next, write intents for keyA and keyB. Note that the
  1934  				// transactions have unpushable priorities if canPush is true and
  1935  				// very pushable ones otherwise.
  1936  				priority := roachpb.UserPriority(-math.MaxInt32)
  1937  				if canPush {
  1938  					priority = -1
  1939  				}
  1940  				args.Value.SetBytes([]byte("value2"))
  1941  				txnA := newTransaction("testA", keyA, priority, store.cfg.Clock)
  1942  				txnB := newTransaction("testB", keyB, priority, store.cfg.Clock)
  1943  				for _, txn := range []*roachpb.Transaction{txnA, txnB} {
  1944  					args.Key = txn.Key
  1945  					assignSeqNumsForReqs(txn, &args)
  1946  					if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil {
  1947  						t.Fatal(pErr)
  1948  					}
  1949  				}
  1950  				// End txn B, but without resolving the intent.
  1951  				etArgs, h := endTxnArgs(txnB, true)
  1952  				assignSeqNumsForReqs(txnB, &etArgs)
  1953  				if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil {
  1954  					t.Fatal(pErr)
  1955  				}
  1956  
  1957  				// Now, get from both keys and verify. Whether we can push or not, we
  1958  				// will be able to read with both INCONSISTENT and READ_UNCOMMITTED.
  1959  				// With READ_UNCOMMITTED, we'll also be able to see the intent's value.
  1960  				gArgs := getArgs(keyA)
  1961  				if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  1962  					ReadConsistency: rc,
  1963  				}, &gArgs); pErr != nil {
  1964  					t.Errorf("expected read to succeed: %s", pErr)
  1965  				} else {
  1966  					gReply := reply.(*roachpb.GetResponse)
  1967  					if replyBytes, err := gReply.Value.GetBytes(); err != nil {
  1968  						t.Fatal(err)
  1969  					} else if !bytes.Equal(replyBytes, []byte("value1")) {
  1970  						t.Errorf("expected value %q, got %+v", []byte("value1"), reply)
  1971  					} else if rc == roachpb.READ_UNCOMMITTED {
  1972  						// READ_UNCOMMITTED will also return the intent.
  1973  						if replyIntentBytes, err := gReply.IntentValue.GetBytes(); err != nil {
  1974  							t.Fatal(err)
  1975  						} else if !bytes.Equal(replyIntentBytes, []byte("value2")) {
  1976  							t.Errorf("expected value %q, got %+v", []byte("value2"), reply)
  1977  						}
  1978  					} else if rc == roachpb.INCONSISTENT {
  1979  						if gReply.IntentValue != nil {
  1980  							t.Errorf("expected value nil, got %+v", gReply.IntentValue)
  1981  						}
  1982  					}
  1983  				}
  1984  
  1985  				gArgs.Key = keyB
  1986  				if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  1987  					ReadConsistency: rc,
  1988  				}, &gArgs); pErr != nil {
  1989  					t.Errorf("expected read to succeed: %s", pErr)
  1990  				} else {
  1991  					gReply := reply.(*roachpb.GetResponse)
  1992  					if gReply.Value != nil {
  1993  						// The new value of B will not be read at first.
  1994  						t.Errorf("expected value nil, got %+v", gReply.Value)
  1995  					} else if rc == roachpb.READ_UNCOMMITTED {
  1996  						// READ_UNCOMMITTED will also return the intent.
  1997  						if replyIntentBytes, err := gReply.IntentValue.GetBytes(); err != nil {
  1998  							t.Fatal(err)
  1999  						} else if !bytes.Equal(replyIntentBytes, []byte("value2")) {
  2000  							t.Errorf("expected value %q, got %+v", []byte("value2"), reply)
  2001  						}
  2002  					} else if rc == roachpb.INCONSISTENT {
  2003  						if gReply.IntentValue != nil {
  2004  							t.Errorf("expected value nil, got %+v", gReply.IntentValue)
  2005  						}
  2006  					}
  2007  				}
  2008  				// However, it will be read eventually, as B's intent can be
  2009  				// resolved asynchronously as txn B is committed.
  2010  				testutils.SucceedsSoon(t, func() error {
  2011  					if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  2012  						ReadConsistency: rc,
  2013  					}, &gArgs); pErr != nil {
  2014  						return errors.Errorf("expected read to succeed: %s", pErr)
  2015  					} else if gReply := reply.(*roachpb.GetResponse).Value; gReply == nil {
  2016  						return errors.Errorf("value is nil")
  2017  					} else if replyBytes, err := gReply.GetBytes(); err != nil {
  2018  						return err
  2019  					} else if !bytes.Equal(replyBytes, []byte("value2")) {
  2020  						return errors.Errorf("expected value %q, got %+v", []byte("value2"), reply)
  2021  					}
  2022  					return nil
  2023  				})
  2024  
  2025  				// Scan keys and verify results.
  2026  				sArgs := scanArgs(keyA, keyB.Next())
  2027  				reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  2028  					ReadConsistency: rc,
  2029  				}, sArgs)
  2030  				if pErr != nil {
  2031  					t.Errorf("expected scan to succeed: %s", pErr)
  2032  				}
  2033  				sReply := reply.(*roachpb.ScanResponse)
  2034  				if l := len(sReply.Rows); l != 2 {
  2035  					t.Errorf("expected 2 results; got %d", l)
  2036  				} else if key := sReply.Rows[0].Key; !key.Equal(keyA) {
  2037  					t.Errorf("expected key %q; got %q", keyA, key)
  2038  				} else if key := sReply.Rows[1].Key; !key.Equal(keyB) {
  2039  					t.Errorf("expected key %q; got %q", keyB, key)
  2040  				} else if val1, err := sReply.Rows[0].Value.GetBytes(); err != nil {
  2041  					t.Fatal(err)
  2042  				} else if !bytes.Equal(val1, []byte("value1")) {
  2043  					t.Errorf("expected value %q, got %q", []byte("value1"), val1)
  2044  				} else if val2, err := sReply.Rows[1].Value.GetBytes(); err != nil {
  2045  					t.Fatal(err)
  2046  				} else if !bytes.Equal(val2, []byte("value2")) {
  2047  					t.Errorf("expected value %q, got %q", []byte("value2"), val2)
  2048  				} else if rc == roachpb.READ_UNCOMMITTED {
  2049  					if l := len(sReply.IntentRows); l != 1 {
  2050  						t.Errorf("expected 1 intent result; got %d", l)
  2051  					} else if intentKey := sReply.IntentRows[0].Key; !intentKey.Equal(keyA) {
  2052  						t.Errorf("expected intent key %q; got %q", keyA, intentKey)
  2053  					} else if intentVal1, err := sReply.IntentRows[0].Value.GetBytes(); err != nil {
  2054  						t.Fatal(err)
  2055  					} else if !bytes.Equal(intentVal1, []byte("value2")) {
  2056  						t.Errorf("expected intent value %q, got %q", []byte("value2"), intentVal1)
  2057  					}
  2058  				} else if rc == roachpb.INCONSISTENT {
  2059  					if l := len(sReply.IntentRows); l != 0 {
  2060  						t.Errorf("expected 0 intent result; got %d", l)
  2061  					}
  2062  				}
  2063  
  2064  				// Reverse scan keys and verify results.
  2065  				rsArgs := revScanArgs(keyA, keyB.Next())
  2066  				reply, pErr = kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  2067  					ReadConsistency: rc,
  2068  				}, rsArgs)
  2069  				if pErr != nil {
  2070  					t.Errorf("expected scan to succeed: %s", pErr)
  2071  				}
  2072  				rsReply := reply.(*roachpb.ReverseScanResponse)
  2073  				if l := len(rsReply.Rows); l != 2 {
  2074  					t.Errorf("expected 2 results; got %d", l)
  2075  				} else if key := rsReply.Rows[0].Key; !key.Equal(keyB) {
  2076  					t.Errorf("expected key %q; got %q", keyA, key)
  2077  				} else if key := rsReply.Rows[1].Key; !key.Equal(keyA) {
  2078  					t.Errorf("expected key %q; got %q", keyB, key)
  2079  				} else if val1, err := rsReply.Rows[0].Value.GetBytes(); err != nil {
  2080  					t.Fatal(err)
  2081  				} else if !bytes.Equal(val1, []byte("value2")) {
  2082  					t.Errorf("expected value %q, got %q", []byte("value2"), val1)
  2083  				} else if val2, err := rsReply.Rows[1].Value.GetBytes(); err != nil {
  2084  					t.Fatal(err)
  2085  				} else if !bytes.Equal(val2, []byte("value1")) {
  2086  					t.Errorf("expected value %q, got %q", []byte("value1"), val2)
  2087  				} else if rc == roachpb.READ_UNCOMMITTED {
  2088  					if l := len(rsReply.IntentRows); l != 1 {
  2089  						t.Errorf("expected 1 intent result; got %d", l)
  2090  					} else if intentKey := rsReply.IntentRows[0].Key; !intentKey.Equal(keyA) {
  2091  						t.Errorf("expected intent key %q; got %q", keyA, intentKey)
  2092  					} else if intentVal1, err := rsReply.IntentRows[0].Value.GetBytes(); err != nil {
  2093  						t.Fatal(err)
  2094  					} else if !bytes.Equal(intentVal1, []byte("value2")) {
  2095  						t.Errorf("expected intent value %q, got %q", []byte("value2"), intentVal1)
  2096  					}
  2097  				} else if rc == roachpb.INCONSISTENT {
  2098  					if l := len(rsReply.IntentRows); l != 0 {
  2099  						t.Errorf("expected 0 intent result; got %d", l)
  2100  					}
  2101  				}
  2102  			}
  2103  		})
  2104  	}
  2105  }
  2106  
  2107  // TestStoreScanResumeTSCache verifies that the timestamp cache is
  2108  // properly updated when scans and reverse scans return partial
  2109  // results and a resume span.
  2110  func TestStoreScanResumeTSCache(t *testing.T) {
  2111  	defer leaktest.AfterTest(t)()
  2112  
  2113  	stopper := stop.NewStopper()
  2114  	defer stopper.Stop(context.Background())
  2115  	store, manualClock := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  2116  
  2117  	// Write three keys at time t0.
  2118  	t0 := 1 * time.Second
  2119  	manualClock.Set(t0.Nanoseconds())
  2120  	h := roachpb.Header{Timestamp: makeTS(t0.Nanoseconds(), 0)}
  2121  	for _, keyStr := range []string{"a", "b", "c"} {
  2122  		key := roachpb.Key(keyStr)
  2123  		putArgs := putArgs(key, []byte("value"))
  2124  		if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &putArgs); pErr != nil {
  2125  			t.Fatal(pErr)
  2126  		}
  2127  	}
  2128  
  2129  	// Scan the span at t1 with max keys and verify the expected resume span.
  2130  	span := roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("d")}
  2131  	sArgs := scanArgs(span.Key, span.EndKey)
  2132  	t1 := 2 * time.Second
  2133  	manualClock.Set(t1.Nanoseconds())
  2134  	h.Timestamp = makeTS(t1.Nanoseconds(), 0)
  2135  	h.MaxSpanRequestKeys = 2
  2136  	reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, sArgs)
  2137  	if pErr != nil {
  2138  		t.Fatal(pErr)
  2139  	}
  2140  	sReply := reply.(*roachpb.ScanResponse)
  2141  	if a, e := len(sReply.Rows), 2; a != e {
  2142  		t.Errorf("expected %d rows; got %d", e, a)
  2143  	}
  2144  	expResumeSpan := &roachpb.Span{Key: roachpb.Key("c"), EndKey: roachpb.Key("d")}
  2145  	if a, e := sReply.ResumeSpan, expResumeSpan; !reflect.DeepEqual(a, e) {
  2146  		t.Errorf("expected resume span %s; got %s", e, a)
  2147  	}
  2148  
  2149  	// Verify the timestamp cache has been set for "b".Next(), but not for "c".
  2150  	rTS, _ := store.tsCache.GetMax(roachpb.Key("b").Next(), nil)
  2151  	if a, e := rTS, makeTS(t1.Nanoseconds(), 0); a != e {
  2152  		t.Errorf("expected timestamp cache for \"b\".Next() set to %s; got %s", e, a)
  2153  	}
  2154  	rTS, _ = store.tsCache.GetMax(roachpb.Key("c"), nil)
  2155  	if a, lt := rTS, makeTS(t1.Nanoseconds(), 0); lt.LessEq(a) {
  2156  		t.Errorf("expected timestamp cache for \"c\" set less than %s; got %s", lt, a)
  2157  	}
  2158  
  2159  	// Reverse scan the span at t1 with max keys and verify the expected resume span.
  2160  	t2 := 3 * time.Second
  2161  	manualClock.Set(t2.Nanoseconds())
  2162  	h.Timestamp = makeTS(t2.Nanoseconds(), 0)
  2163  	rsArgs := revScanArgs(span.Key, span.EndKey)
  2164  	reply, pErr = kv.SendWrappedWith(context.Background(), store.TestSender(), h, rsArgs)
  2165  	if pErr != nil {
  2166  		t.Fatal(pErr)
  2167  	}
  2168  	rsReply := reply.(*roachpb.ReverseScanResponse)
  2169  	if a, e := len(rsReply.Rows), 2; a != e {
  2170  		t.Errorf("expected %d rows; got %d", e, a)
  2171  	}
  2172  	expResumeSpan = &roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("a").Next()}
  2173  	if a, e := rsReply.ResumeSpan, expResumeSpan; !reflect.DeepEqual(a, e) {
  2174  		t.Errorf("expected resume span %s; got %s", e, a)
  2175  	}
  2176  
  2177  	// Verify the timestamp cache has been set for "a".Next(), but not for "a".
  2178  	rTS, _ = store.tsCache.GetMax(roachpb.Key("a").Next(), nil)
  2179  	if a, e := rTS, makeTS(t2.Nanoseconds(), 0); a != e {
  2180  		t.Errorf("expected timestamp cache for \"a\".Next() set to %s; got %s", e, a)
  2181  	}
  2182  	rTS, _ = store.tsCache.GetMax(roachpb.Key("a"), nil)
  2183  	if a, lt := rTS, makeTS(t2.Nanoseconds(), 0); lt.LessEq(a) {
  2184  		t.Errorf("expected timestamp cache for \"a\" set less than %s; got %s", lt, a)
  2185  	}
  2186  }
  2187  
  2188  // TestStoreScanIntents verifies that a scan across 10 intents resolves
  2189  // them in one fell swoop using both consistent and inconsistent reads.
  2190  func TestStoreScanIntents(t *testing.T) {
  2191  	defer leaktest.AfterTest(t)()
  2192  
  2193  	cfg := TestStoreConfig(nil)
  2194  	var count int32
  2195  	countPtr := &count
  2196  
  2197  	cfg.TestingKnobs.EvalKnobs.TestingEvalFilter =
  2198  		func(filterArgs kvserverbase.FilterArgs) *roachpb.Error {
  2199  			if req, ok := filterArgs.Req.(*roachpb.ScanRequest); ok {
  2200  				// Avoid counting scan requests not generated by this test, e.g. those
  2201  				// generated by periodic gossips.
  2202  				if bytes.HasPrefix(req.Key, []byte(t.Name())) {
  2203  					atomic.AddInt32(countPtr, 1)
  2204  				}
  2205  			}
  2206  			return nil
  2207  		}
  2208  	stopper := stop.NewStopper()
  2209  	defer stopper.Stop(context.Background())
  2210  	store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
  2211  
  2212  	testCases := []struct {
  2213  		consistent bool
  2214  		canPush    bool  // can the txn be pushed?
  2215  		expFinish  bool  // do we expect the scan to finish?
  2216  		expCount   int32 // how many times do we expect to scan?
  2217  	}{
  2218  		// Consistent which can push will make two loops.
  2219  		{true, true, true, 2},
  2220  		// Consistent but can't push will backoff and retry and not finish.
  2221  		{true, false, false, -1},
  2222  		// Inconsistent and can push will make one loop, with async resolves.
  2223  		{false, true, true, 1},
  2224  		// Inconsistent and can't push will just read inconsistent (will read nils).
  2225  		{false, false, true, 1},
  2226  	}
  2227  	for i, test := range testCases {
  2228  		// The command filter just counts the number of scan requests which are
  2229  		// submitted to the range.
  2230  		atomic.StoreInt32(countPtr, 0)
  2231  
  2232  		// Lay down 10 intents to scan over.
  2233  		var txn *roachpb.Transaction
  2234  		keys := []roachpb.Key{}
  2235  		for j := 0; j < 10; j++ {
  2236  			key := roachpb.Key(fmt.Sprintf("%s%d-%02d", t.Name(), i, j))
  2237  			keys = append(keys, key)
  2238  			if txn == nil {
  2239  				priority := roachpb.UserPriority(1)
  2240  				if test.canPush {
  2241  					priority = roachpb.MinUserPriority
  2242  				}
  2243  				txn = newTransaction(fmt.Sprintf("test-%d", i), key, priority, store.cfg.Clock)
  2244  			}
  2245  			args := putArgs(key, []byte(fmt.Sprintf("value%02d", j)))
  2246  			assignSeqNumsForReqs(txn, &args)
  2247  			if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil {
  2248  				t.Fatal(pErr)
  2249  			}
  2250  		}
  2251  
  2252  		// Scan the range and verify count. Do this in a goroutine in case
  2253  		// it isn't expected to finish.
  2254  		sArgs := scanArgs(keys[0], keys[9].Next())
  2255  		var sReply *roachpb.ScanResponse
  2256  		ts := store.Clock().Now()
  2257  		consistency := roachpb.CONSISTENT
  2258  		if !test.consistent {
  2259  			consistency = roachpb.INCONSISTENT
  2260  		}
  2261  		errChan := make(chan *roachpb.Error, 1)
  2262  		go func() {
  2263  			reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  2264  				Timestamp:       ts,
  2265  				ReadConsistency: consistency,
  2266  			}, sArgs)
  2267  			if pErr == nil {
  2268  				sReply = reply.(*roachpb.ScanResponse)
  2269  			}
  2270  			errChan <- pErr
  2271  		}()
  2272  
  2273  		wait := 1 * time.Second
  2274  		if !test.expFinish {
  2275  			wait = 10 * time.Millisecond
  2276  		}
  2277  		select {
  2278  		case pErr := <-errChan:
  2279  			if pErr != nil {
  2280  				t.Fatal(pErr)
  2281  			}
  2282  			if len(sReply.Rows) != 0 {
  2283  				t.Errorf("expected empty scan result; got %+v", sReply.Rows)
  2284  			}
  2285  			if countVal := atomic.LoadInt32(countPtr); countVal != test.expCount {
  2286  				t.Errorf("%d: expected scan count %d; got %d", i, test.expCount, countVal)
  2287  			}
  2288  		case <-time.After(wait):
  2289  			if test.expFinish {
  2290  				t.Errorf("%d: scan failed to finish after %s", i, wait)
  2291  			} else {
  2292  				// Commit the unpushable txn so the read can finish.
  2293  				etArgs, h := endTxnArgs(txn, true)
  2294  				for _, key := range keys {
  2295  					etArgs.LockSpans = append(etArgs.LockSpans, roachpb.Span{Key: key})
  2296  				}
  2297  				assignSeqNumsForReqs(txn, &etArgs)
  2298  				if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil {
  2299  					t.Fatal(pErr)
  2300  				}
  2301  				<-errChan
  2302  			}
  2303  		}
  2304  	}
  2305  }
  2306  
  2307  // TestStoreScanInconsistentResolvesIntents lays down 10 intents,
  2308  // commits the txn without resolving intents, then does repeated
  2309  // inconsistent reads until the data shows up, showing that the
  2310  // inconsistent reads are triggering intent resolution.
  2311  func TestStoreScanInconsistentResolvesIntents(t *testing.T) {
  2312  	defer leaktest.AfterTest(t)()
  2313  	// This test relies on having a committed Txn record and open intents on
  2314  	// the same Range. This only works with auto-gc turned off; alternatively
  2315  	// the test could move to splitting its underlying Range.
  2316  	defer setTxnAutoGC(false)()
  2317  	var intercept atomic.Value
  2318  	intercept.Store(true)
  2319  	cfg := TestStoreConfig(nil)
  2320  	cfg.TestingKnobs.EvalKnobs.TestingEvalFilter =
  2321  		func(filterArgs kvserverbase.FilterArgs) *roachpb.Error {
  2322  			_, ok := filterArgs.Req.(*roachpb.ResolveIntentRequest)
  2323  			if ok && intercept.Load().(bool) {
  2324  				return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
  2325  			}
  2326  			return nil
  2327  		}
  2328  	stopper := stop.NewStopper()
  2329  	defer stopper.Stop(context.Background())
  2330  	store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
  2331  
  2332  	// Lay down 10 intents to scan over.
  2333  	txn := newTransaction("test", roachpb.Key("foo"), 1, store.cfg.Clock)
  2334  	keys := []roachpb.Key{}
  2335  	for j := 0; j < 10; j++ {
  2336  		key := roachpb.Key(fmt.Sprintf("key%02d", j))
  2337  		keys = append(keys, key)
  2338  		args := putArgs(key, []byte(fmt.Sprintf("value%02d", j)))
  2339  		assignSeqNumsForReqs(txn, &args)
  2340  		if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil {
  2341  			t.Fatal(pErr)
  2342  		}
  2343  	}
  2344  
  2345  	// Now, commit txn without resolving intents. If we hadn't disabled auto-gc
  2346  	// of Txn entries in this test, the Txn entry would be removed and later
  2347  	// attempts to resolve the intents would fail.
  2348  	etArgs, h := endTxnArgs(txn, true)
  2349  	assignSeqNumsForReqs(txn, &etArgs)
  2350  	if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil {
  2351  		t.Fatal(pErr)
  2352  	}
  2353  
  2354  	intercept.Store(false) // allow async intent resolution
  2355  
  2356  	// Scan the range repeatedly until we've verified count.
  2357  	sArgs := scanArgs(keys[0], keys[9].Next())
  2358  	testutils.SucceedsSoon(t, func() error {
  2359  		if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
  2360  			ReadConsistency: roachpb.INCONSISTENT,
  2361  		}, sArgs); pErr != nil {
  2362  			return pErr.GoError()
  2363  		} else if sReply := reply.(*roachpb.ScanResponse); len(sReply.Rows) != 10 {
  2364  			return errors.Errorf("could not read rows as expected")
  2365  		}
  2366  		return nil
  2367  	})
  2368  }
  2369  
  2370  // TestStoreScanIntentsFromTwoTxns lays down two intents from two
  2371  // different transactions. The clock is next moved forward, causing
  2372  // the transaction to expire. The intents are then scanned
  2373  // consistently, which triggers a push of both transactions and then
  2374  // resolution of the intents, allowing the scan to complete.
  2375  func TestStoreScanIntentsFromTwoTxns(t *testing.T) {
  2376  	defer leaktest.AfterTest(t)()
  2377  	stopper := stop.NewStopper()
  2378  	defer stopper.Stop(context.Background())
  2379  	store, manualClock := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  2380  
  2381  	// Lay down two intents from two txns to scan over.
  2382  	key1 := roachpb.Key("bar")
  2383  	txn1 := newTransaction("test1", key1, 1, store.cfg.Clock)
  2384  	args := putArgs(key1, []byte("value1"))
  2385  	assignSeqNumsForReqs(txn1, &args)
  2386  	if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn1}, &args); pErr != nil {
  2387  		t.Fatal(pErr)
  2388  	}
  2389  
  2390  	key2 := roachpb.Key("foo")
  2391  	txn2 := newTransaction("test2", key2, 1, store.cfg.Clock)
  2392  	args = putArgs(key2, []byte("value2"))
  2393  	assignSeqNumsForReqs(txn2, &args)
  2394  	if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn2}, &args); pErr != nil {
  2395  		t.Fatal(pErr)
  2396  	}
  2397  
  2398  	// Now, expire the transactions by moving the clock forward. This will
  2399  	// result in the subsequent scan operation pushing both transactions
  2400  	// in a single batch.
  2401  	manualClock.Increment(txnwait.TxnLivenessThreshold.Nanoseconds() + 1)
  2402  
  2403  	// Scan the range and verify empty result (expired txn is aborted,
  2404  	// cleaning up intents).
  2405  	sArgs := scanArgs(key1, key2.Next())
  2406  	if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{}, sArgs); pErr != nil {
  2407  		t.Fatal(pErr)
  2408  	} else if sReply := reply.(*roachpb.ScanResponse); len(sReply.Rows) != 0 {
  2409  		t.Errorf("expected empty result; got %+v", sReply.Rows)
  2410  	}
  2411  }
  2412  
  2413  // TestStoreScanMultipleIntents lays down ten intents from a single
  2414  // transaction. The clock is then moved forward such that the txn is
  2415  // expired and the intents are scanned INCONSISTENTly. Verify that all
  2416  // ten intents are resolved from a single INCONSISTENT scan.
  2417  func TestStoreScanMultipleIntents(t *testing.T) {
  2418  	defer leaktest.AfterTest(t)()
  2419  
  2420  	var resolveCount int32
  2421  	manual := hlc.NewManualClock(123)
  2422  	cfg := TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
  2423  	cfg.TestingKnobs.EvalKnobs.TestingEvalFilter =
  2424  		func(filterArgs kvserverbase.FilterArgs) *roachpb.Error {
  2425  			if _, ok := filterArgs.Req.(*roachpb.ResolveIntentRequest); ok {
  2426  				atomic.AddInt32(&resolveCount, 1)
  2427  			}
  2428  			return nil
  2429  		}
  2430  	stopper := stop.NewStopper()
  2431  	defer stopper.Stop(context.Background())
  2432  	store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
  2433  
  2434  	// Lay down ten intents from a single txn.
  2435  	key1 := roachpb.Key("key00")
  2436  	key10 := roachpb.Key("key09")
  2437  	txn := newTransaction("test", key1, 1, store.cfg.Clock)
  2438  	ba := roachpb.BatchRequest{}
  2439  	for i := 0; i < 10; i++ {
  2440  		pArgs := putArgs(roachpb.Key(fmt.Sprintf("key%02d", i)), []byte("value"))
  2441  		ba.Add(&pArgs)
  2442  		assignSeqNumsForReqs(txn, &pArgs)
  2443  	}
  2444  	ba.Header = roachpb.Header{Txn: txn}
  2445  	if _, pErr := store.TestSender().Send(context.Background(), ba); pErr != nil {
  2446  		t.Fatal(pErr)
  2447  	}
  2448  
  2449  	// Now, expire the transactions by moving the clock forward. This will
  2450  	// result in the subsequent scan operation pushing both transactions
  2451  	// in a single batch.
  2452  	manual.Increment(txnwait.TxnLivenessThreshold.Nanoseconds() + 1)
  2453  
  2454  	// Query the range with a single scan, which should cause all intents
  2455  	// to be resolved.
  2456  	sArgs := scanArgs(key1, key10.Next())
  2457  	if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); pErr != nil {
  2458  		t.Fatal(pErr)
  2459  	}
  2460  
  2461  	// Verify all ten intents are resolved from the single inconsistent scan.
  2462  	testutils.SucceedsSoon(t, func() error {
  2463  		if a, e := atomic.LoadInt32(&resolveCount), int32(10); a != e {
  2464  			return fmt.Errorf("expected %d; got %d resolves", e, a)
  2465  		}
  2466  		return nil
  2467  	})
  2468  }
  2469  
  2470  // TestStoreBadRequests verifies that Send returns errors for
  2471  // bad requests that do not pass key verification.
  2472  func TestStoreBadRequests(t *testing.T) {
  2473  	defer leaktest.AfterTest(t)()
  2474  	stopper := stop.NewStopper()
  2475  	defer stopper.Stop(context.Background())
  2476  	store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper)
  2477  
  2478  	txn := newTransaction("test", roachpb.Key("a"), 1 /* priority */, store.cfg.Clock)
  2479  
  2480  	args1 := getArgs(roachpb.Key("a"))
  2481  	args1.EndKey = roachpb.Key("b")
  2482  
  2483  	args2 := getArgs(roachpb.RKeyMax)
  2484  
  2485  	args3 := scanArgs(roachpb.Key("a"), roachpb.Key("a"))
  2486  	args4 := scanArgs(roachpb.Key("b"), roachpb.Key("a"))
  2487  
  2488  	args5 := scanArgs(roachpb.RKeyMin, roachpb.Key("a"))
  2489  	args6 := scanArgs(keys.RangeDescriptorKey(roachpb.RKey(keys.MinKey)), roachpb.Key("a"))
  2490  
  2491  	tArgs0, _ := heartbeatArgs(txn, hlc.Timestamp{})
  2492  
  2493  	tArgs2, tHeader2 := endTxnArgs(txn, false /* commit */)
  2494  	tHeader2.Txn.Key = roachpb.Key(tHeader2.Txn.Key).Next()
  2495  
  2496  	tArgs3, tHeader3 := heartbeatArgs(txn, hlc.Timestamp{})
  2497  	tHeader3.Txn.Key = roachpb.Key(tHeader3.Txn.Key).Next()
  2498  
  2499  	tArgs4 := pushTxnArgs(txn, txn, roachpb.PUSH_ABORT)
  2500  	tArgs4.PusheeTxn.Key = roachpb.Key(txn.Key).Next()
  2501  
  2502  	testCases := []struct {
  2503  		args   roachpb.Request
  2504  		header *roachpb.Header
  2505  		err    string
  2506  	}{
  2507  		// EndKey for non-Range is invalid.
  2508  		{&args1, nil, "should not be specified"},
  2509  		// Start key must be less than KeyMax.
  2510  		{&args2, nil, "must be less than"},
  2511  		// End key must be greater than start.
  2512  		{args3, nil, "must be greater than"},
  2513  		{args4, nil, "must be greater than"},
  2514  		// Can't range from local to global.
  2515  		{args5, nil, "must be greater than LocalMax"},
  2516  		{args6, nil, "is range-local, but"},
  2517  		// Txn must be specified in Header.
  2518  		{&tArgs0, nil, "no transaction specified"},
  2519  		// Txn key must be same as the request key.
  2520  		{&tArgs2, &tHeader2, "request key .* should match txn key .*"},
  2521  		{&tArgs3, &tHeader3, "request key .* should match txn key .*"},
  2522  		{&tArgs4, nil, "request key .* should match pushee"},
  2523  	}
  2524  	for i, test := range testCases {
  2525  		t.Run("", func(t *testing.T) {
  2526  			if test.header == nil {
  2527  				test.header = &roachpb.Header{}
  2528  			}
  2529  			if test.header.Txn != nil {
  2530  				assignSeqNumsForReqs(test.header.Txn, test.args)
  2531  			}
  2532  			if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), *test.header, test.args); !testutils.IsPError(pErr, test.err) {
  2533  				t.Errorf("%d expected error %q, got error %v", i, test.err, pErr)
  2534  			}
  2535  		})
  2536  	}
  2537  }
  2538  
  2539  // fakeRangeQueue implements the rangeQueue interface and
  2540  // records which range is passed to MaybeRemove.
  2541  type fakeRangeQueue struct {
  2542  	maybeRemovedRngs chan roachpb.RangeID
  2543  }
  2544  
  2545  func (fq *fakeRangeQueue) Start(_ *stop.Stopper) {
  2546  	// Do nothing
  2547  }
  2548  
  2549  func (fq *fakeRangeQueue) MaybeAddAsync(context.Context, replicaInQueue, hlc.Timestamp) {
  2550  	// Do nothing
  2551  }
  2552  
  2553  func (fq *fakeRangeQueue) MaybeRemove(rangeID roachpb.RangeID) {
  2554  	fq.maybeRemovedRngs <- rangeID
  2555  }
  2556  
  2557  func (fq *fakeRangeQueue) Name() string {
  2558  	return "fakeRangeQueue"
  2559  }
  2560  
  2561  func (fq *fakeRangeQueue) NeedsLease() bool {
  2562  	return false
  2563  }
  2564  
  2565  // TestMaybeRemove tests that MaybeRemove is called when a range is removed.
  2566  func TestMaybeRemove(t *testing.T) {
  2567  	defer leaktest.AfterTest(t)()
  2568  	cfg := TestStoreConfig(nil)
  2569  	stopper := stop.NewStopper()
  2570  	defer stopper.Stop(context.Background())
  2571  	store := createTestStoreWithoutStart(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
  2572  
  2573  	// Add a queue to the scanner before starting the store and running the scanner.
  2574  	// This is necessary to avoid data race.
  2575  	fq := &fakeRangeQueue{
  2576  		maybeRemovedRngs: make(chan roachpb.RangeID),
  2577  	}
  2578  	store.scanner.AddQueues(fq)
  2579  
  2580  	if err := store.Start(context.Background(), stopper); err != nil {
  2581  		t.Fatal(err)
  2582  	}
  2583  	store.WaitForInit()
  2584  
  2585  	repl, err := store.GetReplica(1)
  2586  	if err != nil {
  2587  		t.Error(err)
  2588  	}
  2589  	if err := store.RemoveReplica(context.Background(), repl, repl.Desc().NextReplicaID, RemoveOptions{
  2590  		DestroyData: true,
  2591  	}); err != nil {
  2592  		t.Error(err)
  2593  	}
  2594  	// MaybeRemove is called.
  2595  	removedRng := <-fq.maybeRemovedRngs
  2596  	if removedRng != repl.RangeID {
  2597  		t.Errorf("Unexpected removed range %v", removedRng)
  2598  	}
  2599  }
  2600  
  2601  func TestStoreGCThreshold(t *testing.T) {
  2602  	defer leaktest.AfterTest(t)()
  2603  	tc := testContext{}
  2604  	stopper := stop.NewStopper()
  2605  	defer stopper.Stop(context.Background())
  2606  	tc.Start(t, stopper)
  2607  	store := tc.store
  2608  
  2609  	assertThreshold := func(ts hlc.Timestamp) {
  2610  		repl, err := store.GetReplica(1)
  2611  		if err != nil {
  2612  			t.Fatal(err)
  2613  		}
  2614  		repl.mu.Lock()
  2615  		gcThreshold := *repl.mu.state.GCThreshold
  2616  		pgcThreshold, err := repl.mu.stateLoader.LoadGCThreshold(context.Background(), store.Engine())
  2617  		repl.mu.Unlock()
  2618  		if err != nil {
  2619  			t.Fatal(err)
  2620  		}
  2621  		if gcThreshold != *pgcThreshold {
  2622  			t.Fatalf("persisted != in-memory threshold: %s vs %s", pgcThreshold, gcThreshold)
  2623  		}
  2624  		if *pgcThreshold != ts {
  2625  			t.Fatalf("expected timestamp %s, got %s", ts, pgcThreshold)
  2626  		}
  2627  	}
  2628  
  2629  	// Threshold should start at zero.
  2630  	assertThreshold(hlc.Timestamp{})
  2631  
  2632  	threshold := hlc.Timestamp{
  2633  		WallTime: 2e9,
  2634  	}
  2635  
  2636  	gcr := roachpb.GCRequest{
  2637  		// Bogus span to make it a valid request.
  2638  		RequestHeader: roachpb.RequestHeader{
  2639  			Key:    roachpb.Key("a"),
  2640  			EndKey: roachpb.Key("b"),
  2641  		},
  2642  		Threshold: threshold,
  2643  	}
  2644  	if _, pErr := tc.SendWrappedWith(roachpb.Header{RangeID: 1}, &gcr); pErr != nil {
  2645  		t.Fatal(pErr)
  2646  	}
  2647  
  2648  	assertThreshold(threshold)
  2649  }
  2650  
  2651  // TestRaceOnTryGetOrCreateReplicas exercises a case where a race between
  2652  // different raft messages addressed to different replica IDs could lead to
  2653  // a nil pointer panic.
  2654  func TestRaceOnTryGetOrCreateReplicas(t *testing.T) {
  2655  	defer leaktest.AfterTest(t)()
  2656  	tc := testContext{}
  2657  	stopper := stop.NewStopper()
  2658  	ctx := context.Background()
  2659  	defer stopper.Stop(ctx)
  2660  	tc.Start(t, stopper)
  2661  	s := tc.store
  2662  	var wg sync.WaitGroup
  2663  	for i := 3; i < 100; i++ {
  2664  		wg.Add(1)
  2665  		go func(rid roachpb.ReplicaID) {
  2666  			defer wg.Done()
  2667  			r, _, _ := s.getOrCreateReplica(ctx, 42, rid, &roachpb.ReplicaDescriptor{
  2668  				NodeID:    2,
  2669  				StoreID:   2,
  2670  				ReplicaID: 2,
  2671  			}, false)
  2672  			if r != nil {
  2673  				r.raftMu.Unlock()
  2674  			}
  2675  		}(roachpb.ReplicaID(i))
  2676  	}
  2677  	wg.Wait()
  2678  }
  2679  
  2680  func TestStoreRangePlaceholders(t *testing.T) {
  2681  	defer leaktest.AfterTest(t)()
  2682  	tc := testContext{}
  2683  	stopper := stop.NewStopper()
  2684  	ctx := context.Background()
  2685  	defer stopper.Stop(ctx)
  2686  	tc.Start(t, stopper)
  2687  	s := tc.store
  2688  
  2689  	s.mu.Lock()
  2690  	numPlaceholders := len(s.mu.replicaPlaceholders)
  2691  	s.mu.Unlock()
  2692  
  2693  	if numPlaceholders != 0 {
  2694  		t.Fatal("new store should have zero replica placeholders")
  2695  	}
  2696  
  2697  	// Clobber the existing range so we can test non-overlapping placeholders.
  2698  	repl1, err := s.GetReplica(1)
  2699  	if err != nil {
  2700  		t.Error(err)
  2701  	}
  2702  	if err := s.RemoveReplica(ctx, repl1, repl1.Desc().NextReplicaID, RemoveOptions{
  2703  		DestroyData: true,
  2704  	}); err != nil {
  2705  		t.Error(err)
  2706  	}
  2707  
  2708  	repID := roachpb.RangeID(2)
  2709  	rep := createReplica(s, repID, roachpb.RKeyMin, roachpb.RKey("c"))
  2710  	if err := s.AddReplica(rep); err != nil {
  2711  		t.Fatal(err)
  2712  	}
  2713  
  2714  	placeholder1 := &ReplicaPlaceholder{
  2715  		rangeDesc: roachpb.RangeDescriptor{
  2716  			RangeID:  roachpb.RangeID(7),
  2717  			StartKey: roachpb.RKey("c"),
  2718  			EndKey:   roachpb.RKey("d"),
  2719  		},
  2720  	}
  2721  	placeholder2 := &ReplicaPlaceholder{
  2722  		rangeDesc: roachpb.RangeDescriptor{
  2723  			RangeID:  roachpb.RangeID(8),
  2724  			StartKey: roachpb.RKey("d"),
  2725  			EndKey:   roachpb.RKeyMax,
  2726  		},
  2727  	}
  2728  
  2729  	s.mu.Lock()
  2730  	defer s.mu.Unlock()
  2731  
  2732  	// Test that simple insertion works.
  2733  	if err := s.addPlaceholderLocked(placeholder1); err != nil {
  2734  		t.Fatalf("could not add placeholder to empty store, got %s", err)
  2735  	}
  2736  	if err := s.addPlaceholderLocked(placeholder2); err != nil {
  2737  		t.Fatalf("could not add non-overlapping placeholder, got %s", err)
  2738  	}
  2739  
  2740  	// Test that simple deletion works.
  2741  	if !s.removePlaceholderLocked(ctx, placeholder1.rangeDesc.RangeID) {
  2742  		t.Fatalf("could not remove placeholder that was present")
  2743  	}
  2744  
  2745  	// Test cannot double insert the same placeholder.
  2746  	if err := s.addPlaceholderLocked(placeholder1); err != nil {
  2747  		t.Fatalf("could not re-add placeholder after removal, got %s", err)
  2748  	}
  2749  	if err := s.addPlaceholderLocked(placeholder1); !testutils.IsError(err, ".*overlaps with existing KeyRange") {
  2750  		t.Fatalf("should not be able to add ReplicaPlaceholder for the same key twice, got: %+v", err)
  2751  	}
  2752  
  2753  	// Test cannot double delete a placeholder.
  2754  	if !s.removePlaceholderLocked(ctx, placeholder1.rangeDesc.RangeID) {
  2755  		t.Fatalf("could not remove placeholder that was present")
  2756  	}
  2757  	if s.removePlaceholderLocked(ctx, placeholder1.rangeDesc.RangeID) {
  2758  		t.Fatalf("successfully removed placeholder that was not present")
  2759  	}
  2760  
  2761  	// This placeholder overlaps with an existing replica.
  2762  	placeholder1 = &ReplicaPlaceholder{
  2763  		rangeDesc: roachpb.RangeDescriptor{
  2764  			RangeID:  repID,
  2765  			StartKey: roachpb.RKeyMin,
  2766  			EndKey:   roachpb.RKey("c"),
  2767  		},
  2768  	}
  2769  
  2770  	// Test that placeholder cannot clobber existing replica.
  2771  	if err := s.addPlaceholderLocked(placeholder1); !testutils.IsError(err, ".*overlaps with existing KeyRange") {
  2772  		t.Fatalf("should not be able to add ReplicaPlaceholder when Replica already exists, got: %+v", err)
  2773  	}
  2774  
  2775  	// Test that Placeholder deletion doesn't delete replicas.
  2776  	if s.removePlaceholderLocked(ctx, repID) {
  2777  		t.Fatalf("should not be able to process removeReplicaPlaceholder for a RangeID where a Replica exists")
  2778  	}
  2779  }
  2780  
  2781  // Test that we remove snapshot placeholders when raft ignores the
  2782  // snapshot. This is testing the removal of placeholder after handleRaftReady
  2783  // processing for an uninitialized Replica.
  2784  func TestStoreRemovePlaceholderOnRaftIgnored(t *testing.T) {
  2785  	defer leaktest.AfterTest(t)()
  2786  	tc := testContext{}
  2787  	stopper := stop.NewStopper()
  2788  	defer stopper.Stop(context.Background())
  2789  	tc.Start(t, stopper)
  2790  	s := tc.store
  2791  	ctx := context.Background()
  2792  
  2793  	// Clobber the existing range and recreated it with an uninitialized
  2794  	// descriptor so we can test nonoverlapping placeholders.
  2795  	repl1, err := s.GetReplica(1)
  2796  	if err != nil {
  2797  		t.Fatal(err)
  2798  	}
  2799  	if err := s.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{
  2800  		DestroyData: true,
  2801  	}); err != nil {
  2802  		t.Fatal(err)
  2803  	}
  2804  
  2805  	uninitDesc := roachpb.RangeDescriptor{RangeID: repl1.Desc().RangeID}
  2806  	if _, err := stateloader.WriteInitialState(
  2807  		ctx, s.Engine(), enginepb.MVCCStats{}, uninitDesc, roachpb.Lease{},
  2808  		hlc.Timestamp{}, stateloader.TruncatedStateUnreplicated,
  2809  	); err != nil {
  2810  		t.Fatal(err)
  2811  	}
  2812  	uninitRepl1, err := newReplica(ctx, &uninitDesc, s, 2)
  2813  	if err != nil {
  2814  		t.Fatal(err)
  2815  	}
  2816  	if err := s.addReplicaToRangeMapLocked(uninitRepl1); err != nil {
  2817  		t.Fatal(err)
  2818  	}
  2819  
  2820  	// Generate a minimal fake snapshot.
  2821  	snapData := &roachpb.RaftSnapshotData{}
  2822  	data, err := protoutil.Marshal(snapData)
  2823  	if err != nil {
  2824  		t.Fatal(err)
  2825  	}
  2826  
  2827  	// Wrap the snapshot in a minimal header. The request will be dropped
  2828  	// because the Raft log index and term are less than the hard state written
  2829  	// above.
  2830  	req := &SnapshotRequest_Header{
  2831  		State: kvserverpb.ReplicaState{Desc: repl1.Desc()},
  2832  		RaftMessageRequest: RaftMessageRequest{
  2833  			RangeID: 1,
  2834  			ToReplica: roachpb.ReplicaDescriptor{
  2835  				NodeID:    1,
  2836  				StoreID:   1,
  2837  				ReplicaID: 2,
  2838  			},
  2839  			FromReplica: roachpb.ReplicaDescriptor{
  2840  				NodeID:    2,
  2841  				StoreID:   2,
  2842  				ReplicaID: 3,
  2843  			},
  2844  			Message: raftpb.Message{
  2845  				Type: raftpb.MsgSnap,
  2846  				Snapshot: raftpb.Snapshot{
  2847  					Data: data,
  2848  					Metadata: raftpb.SnapshotMetadata{
  2849  						Index: 1,
  2850  						Term:  1,
  2851  					},
  2852  				},
  2853  			},
  2854  		},
  2855  	}
  2856  	if err := s.processRaftSnapshotRequest(ctx, req,
  2857  		IncomingSnapshot{
  2858  			SnapUUID: uuid.MakeV4(),
  2859  			State:    &kvserverpb.ReplicaState{Desc: repl1.Desc()},
  2860  		}); err != nil {
  2861  		t.Fatal(err)
  2862  	}
  2863  
  2864  	testutils.SucceedsSoon(t, func() error {
  2865  		s.mu.Lock()
  2866  		numPlaceholders := len(s.mu.replicaPlaceholders)
  2867  		s.mu.Unlock()
  2868  
  2869  		if numPlaceholders != 0 {
  2870  			return errors.Errorf("expected 0 placeholders, but found %d", numPlaceholders)
  2871  		}
  2872  		// The count of dropped placeholders is incremented after the placeholder
  2873  		// is removed (and while not holding Store.mu), so we need to perform the
  2874  		// check of the number of dropped placeholders in this retry loop.
  2875  		if n := atomic.LoadInt32(&s.counts.droppedPlaceholders); n != 1 {
  2876  			return errors.Errorf("expected 1 dropped placeholder, but found %d", n)
  2877  		}
  2878  		return nil
  2879  	})
  2880  }
  2881  
  2882  type fakeSnapshotStream struct {
  2883  	nextResp *SnapshotResponse
  2884  	nextErr  error
  2885  }
  2886  
  2887  func (c fakeSnapshotStream) Recv() (*SnapshotResponse, error) {
  2888  	return c.nextResp, c.nextErr
  2889  }
  2890  
  2891  func (c fakeSnapshotStream) Send(request *SnapshotRequest) error {
  2892  	return nil
  2893  }
  2894  
  2895  type fakeStorePool struct {
  2896  	declinedThrottles int
  2897  	failedThrottles   int
  2898  }
  2899  
  2900  func (sp *fakeStorePool) throttle(reason throttleReason, why string, toStoreID roachpb.StoreID) {
  2901  	switch reason {
  2902  	case throttleDeclined:
  2903  		sp.declinedThrottles++
  2904  	case throttleFailed:
  2905  		sp.failedThrottles++
  2906  	}
  2907  }
  2908  
  2909  // TestSendSnapshotThrottling tests the store pool throttling behavior of
  2910  // store.sendSnapshot, ensuring that it properly updates the StorePool on
  2911  // various exceptional conditions and new capacity estimates.
  2912  func TestSendSnapshotThrottling(t *testing.T) {
  2913  	defer leaktest.AfterTest(t)()
  2914  	e := storage.NewDefaultInMem()
  2915  	defer e.Close()
  2916  
  2917  	ctx := context.Background()
  2918  	var cfg base.RaftConfig
  2919  	cfg.SetDefaults()
  2920  	st := cluster.MakeTestingClusterSettings()
  2921  
  2922  	header := SnapshotRequest_Header{
  2923  		CanDecline: true,
  2924  		State: kvserverpb.ReplicaState{
  2925  			Desc: &roachpb.RangeDescriptor{RangeID: 1},
  2926  		},
  2927  	}
  2928  	newBatch := e.NewBatch
  2929  
  2930  	// Test that a failed Recv() causes a fail throttle
  2931  	{
  2932  		sp := &fakeStorePool{}
  2933  		expectedErr := errors.New("")
  2934  		c := fakeSnapshotStream{nil, expectedErr}
  2935  		err := sendSnapshot(ctx, &cfg, st, c, sp, header, nil, newBatch, nil)
  2936  		if sp.failedThrottles != 1 {
  2937  			t.Fatalf("expected 1 failed throttle, but found %d", sp.failedThrottles)
  2938  		}
  2939  		if !errors.Is(err, expectedErr) {
  2940  			t.Fatalf("expected error %s, but found %s", err, expectedErr)
  2941  		}
  2942  	}
  2943  
  2944  	// Test that a declined snapshot causes a decline throttle.
  2945  	{
  2946  		sp := &fakeStorePool{}
  2947  		resp := &SnapshotResponse{
  2948  			Status: SnapshotResponse_DECLINED,
  2949  		}
  2950  		c := fakeSnapshotStream{resp, nil}
  2951  		err := sendSnapshot(ctx, &cfg, st, c, sp, header, nil, newBatch, nil)
  2952  		if sp.declinedThrottles != 1 {
  2953  			t.Fatalf("expected 1 declined throttle, but found %d", sp.declinedThrottles)
  2954  		}
  2955  		if err == nil {
  2956  			t.Fatalf("expected error, found nil")
  2957  		}
  2958  	}
  2959  
  2960  	// Test that a declined but required snapshot causes a fail throttle.
  2961  	{
  2962  		sp := &fakeStorePool{}
  2963  		header.CanDecline = false
  2964  		resp := &SnapshotResponse{
  2965  			Status: SnapshotResponse_DECLINED,
  2966  		}
  2967  		c := fakeSnapshotStream{resp, nil}
  2968  		err := sendSnapshot(ctx, &cfg, st, c, sp, header, nil, newBatch, nil)
  2969  		if sp.failedThrottles != 1 {
  2970  			t.Fatalf("expected 1 failed throttle, but found %d", sp.failedThrottles)
  2971  		}
  2972  		if err == nil {
  2973  			t.Fatalf("expected error, found nil")
  2974  		}
  2975  	}
  2976  
  2977  	// Test that an errored snapshot causes a fail throttle.
  2978  	{
  2979  		sp := &fakeStorePool{}
  2980  		resp := &SnapshotResponse{
  2981  			Status: SnapshotResponse_ERROR,
  2982  		}
  2983  		c := fakeSnapshotStream{resp, nil}
  2984  		err := sendSnapshot(ctx, &cfg, st, c, sp, header, nil, newBatch, nil)
  2985  		if sp.failedThrottles != 1 {
  2986  			t.Fatalf("expected 1 failed throttle, but found %d", sp.failedThrottles)
  2987  		}
  2988  		if err == nil {
  2989  			t.Fatalf("expected error, found nil")
  2990  		}
  2991  	}
  2992  }
  2993  
  2994  func TestReserveSnapshotThrottling(t *testing.T) {
  2995  	defer leaktest.AfterTest(t)()
  2996  
  2997  	stopper := stop.NewStopper()
  2998  	defer stopper.Stop(context.Background())
  2999  	tc := testContext{}
  3000  	tc.Start(t, stopper)
  3001  	s := tc.store
  3002  
  3003  	ctx := context.Background()
  3004  
  3005  	cleanupNonEmpty1, rejectionMsg, err := s.reserveSnapshot(ctx, &SnapshotRequest_Header{
  3006  		RangeSize: 1,
  3007  	})
  3008  	if err != nil {
  3009  		t.Fatal(err)
  3010  	}
  3011  	if rejectionMsg != "" {
  3012  		t.Fatalf("expected no rejection message, got %q", rejectionMsg)
  3013  	}
  3014  	if n := s.ReservationCount(); n != 1 {
  3015  		t.Fatalf("expected 1 reservation, but found %d", n)
  3016  	}
  3017  
  3018  	// Ensure we allow a concurrent empty snapshot.
  3019  	cleanupEmpty, rejectionMsg, err := s.reserveSnapshot(ctx, &SnapshotRequest_Header{})
  3020  	if err != nil {
  3021  		t.Fatal(err)
  3022  	}
  3023  	if rejectionMsg != "" {
  3024  		t.Fatalf("expected no rejection message, got %q", rejectionMsg)
  3025  	}
  3026  	// Empty snapshots are not throttled and so do not increase the reservation
  3027  	// count.
  3028  	if n := s.ReservationCount(); n != 1 {
  3029  		t.Fatalf("expected 1 reservation, but found %d", n)
  3030  	}
  3031  	cleanupEmpty()
  3032  
  3033  	// Verify that a declinable snapshot will be declined if another is in
  3034  	// progress.
  3035  	cleanupNonEmpty2, rejectionMsg, err := s.reserveSnapshot(ctx, &SnapshotRequest_Header{
  3036  		RangeSize:  1,
  3037  		CanDecline: true,
  3038  	})
  3039  	if err != nil {
  3040  		t.Fatal(err)
  3041  	}
  3042  	if rejectionMsg != snapshotApplySemBusyMsg {
  3043  		t.Fatalf("expected rejection message %q, got %q", snapshotApplySemBusyMsg, rejectionMsg)
  3044  	}
  3045  	if cleanupNonEmpty2 != nil {
  3046  		t.Fatalf("got unexpected non-nil cleanup method")
  3047  	}
  3048  	if n := s.ReservationCount(); n != 1 {
  3049  		t.Fatalf("expected 1 reservation, but found %d", n)
  3050  	}
  3051  
  3052  	// Verify we block concurrent snapshots by spawning a goroutine which will
  3053  	// execute the cleanup after a short delay but only if another snapshot was
  3054  	// not allowed through.
  3055  	var boom int32
  3056  	go func() {
  3057  		time.Sleep(20 * time.Millisecond)
  3058  		if atomic.LoadInt32(&boom) == 0 {
  3059  			cleanupNonEmpty1()
  3060  		}
  3061  	}()
  3062  
  3063  	cleanupNonEmpty3, rejectionMsg, err := s.reserveSnapshot(ctx, &SnapshotRequest_Header{
  3064  		RangeSize: 1,
  3065  	})
  3066  	if err != nil {
  3067  		t.Fatal(err)
  3068  	}
  3069  	if rejectionMsg != "" {
  3070  		t.Fatalf("expected no rejection message, got %q", rejectionMsg)
  3071  	}
  3072  	atomic.StoreInt32(&boom, 1)
  3073  	cleanupNonEmpty3()
  3074  
  3075  	if n := s.ReservationCount(); n != 0 {
  3076  		t.Fatalf("expected 0 reservations, but found %d", n)
  3077  	}
  3078  }
  3079  
  3080  // TestReserveSnapshotFullnessLimit verifies that snapshots are rejected when
  3081  // the recipient store's disk is near full.
  3082  func TestReserveSnapshotFullnessLimit(t *testing.T) {
  3083  	defer leaktest.AfterTest(t)()
  3084  
  3085  	stopper := stop.NewStopper()
  3086  	defer stopper.Stop(context.Background())
  3087  	tc := testContext{}
  3088  	tc.Start(t, stopper)
  3089  	s := tc.store
  3090  
  3091  	ctx := context.Background()
  3092  
  3093  	desc, err := s.Descriptor(false /* useCached */)
  3094  	if err != nil {
  3095  		t.Fatal(err)
  3096  	}
  3097  	desc.Capacity.Available = 1
  3098  	desc.Capacity.Used = desc.Capacity.Capacity - desc.Capacity.Available
  3099  
  3100  	s.cfg.StorePool.detailsMu.Lock()
  3101  	s.cfg.StorePool.getStoreDetailLocked(desc.StoreID).desc = desc
  3102  	s.cfg.StorePool.detailsMu.Unlock()
  3103  
  3104  	// A declinable snapshot to a nearly full store should be rejected.
  3105  	cleanupRejected, rejectionMsg, err := s.reserveSnapshot(ctx, &SnapshotRequest_Header{
  3106  		RangeSize:  1,
  3107  		CanDecline: true,
  3108  	})
  3109  	if err != nil {
  3110  		t.Fatal(err)
  3111  	}
  3112  	if rejectionMsg != snapshotStoreTooFullMsg {
  3113  		t.Fatalf("expected rejection message %q, got %q", snapshotStoreTooFullMsg, rejectionMsg)
  3114  	}
  3115  	if cleanupRejected != nil {
  3116  		t.Fatalf("got unexpected non-nil cleanup method")
  3117  	}
  3118  	if n := s.ReservationCount(); n != 0 {
  3119  		t.Fatalf("expected 0 reservations, but found %d", n)
  3120  	}
  3121  
  3122  	// But a non-declinable snapshot should be allowed.
  3123  	cleanupAccepted, rejectionMsg, err := s.reserveSnapshot(ctx, &SnapshotRequest_Header{
  3124  		RangeSize:  1,
  3125  		CanDecline: false,
  3126  	})
  3127  	if err != nil {
  3128  		t.Fatal(err)
  3129  	}
  3130  	if rejectionMsg != "" {
  3131  		t.Fatalf("expected no rejection message, got %q", rejectionMsg)
  3132  	}
  3133  	if n := s.ReservationCount(); n != 1 {
  3134  		t.Fatalf("expected 1 reservation, but found %d", n)
  3135  	}
  3136  	cleanupAccepted()
  3137  
  3138  	// Even if the store isn't mostly full, a range that's larger than the
  3139  	// available disk space should be rejected.
  3140  	desc.Capacity.Available = desc.Capacity.Capacity / 2
  3141  	desc.Capacity.Used = desc.Capacity.Capacity - desc.Capacity.Available
  3142  	s.cfg.StorePool.detailsMu.Lock()
  3143  	s.cfg.StorePool.getStoreDetailLocked(desc.StoreID).desc = desc
  3144  	s.cfg.StorePool.detailsMu.Unlock()
  3145  
  3146  	// A declinable snapshot to a nearly full store should be rejected.
  3147  	cleanupRejected2, rejectionMsg, err := s.reserveSnapshot(ctx, &SnapshotRequest_Header{
  3148  		RangeSize:  desc.Capacity.Available + 1,
  3149  		CanDecline: true,
  3150  	})
  3151  	if err != nil {
  3152  		t.Fatal(err)
  3153  	}
  3154  	if rejectionMsg != snapshotStoreTooFullMsg {
  3155  		t.Fatalf("expected rejection message %q, got %q", snapshotStoreTooFullMsg, rejectionMsg)
  3156  	}
  3157  	if cleanupRejected2 != nil {
  3158  		t.Fatalf("got unexpected non-nil cleanup method")
  3159  	}
  3160  	if n := s.ReservationCount(); n != 0 {
  3161  		t.Fatalf("expected 0 reservations, but found %d", n)
  3162  	}
  3163  }
  3164  
  3165  func TestSnapshotRateLimit(t *testing.T) {
  3166  	defer leaktest.AfterTest(t)()
  3167  
  3168  	testCases := []struct {
  3169  		priority      SnapshotRequest_Priority
  3170  		expectedLimit rate.Limit
  3171  		expectedErr   string
  3172  	}{
  3173  		{SnapshotRequest_UNKNOWN, 0, "unknown snapshot priority"},
  3174  		{SnapshotRequest_RECOVERY, 8 << 20, ""},
  3175  		{SnapshotRequest_REBALANCE, 8 << 20, ""},
  3176  	}
  3177  	for _, c := range testCases {
  3178  		t.Run(c.priority.String(), func(t *testing.T) {
  3179  			limit, err := snapshotRateLimit(cluster.MakeTestingClusterSettings(), c.priority)
  3180  			if !testutils.IsError(err, c.expectedErr) {
  3181  				t.Fatalf("expected \"%s\", but found %v", c.expectedErr, err)
  3182  			}
  3183  			if c.expectedLimit != limit {
  3184  				t.Fatalf("expected %v, but found %v", c.expectedLimit, limit)
  3185  			}
  3186  		})
  3187  	}
  3188  }
  3189  
  3190  func BenchmarkStoreGetReplica(b *testing.B) {
  3191  	stopper := stop.NewStopper()
  3192  	defer stopper.Stop(context.Background())
  3193  	store, _ := createTestStore(b, testStoreOpts{createSystemRanges: true}, stopper)
  3194  
  3195  	b.RunParallel(func(pb *testing.PB) {
  3196  		for pb.Next() {
  3197  			_, err := store.GetReplica(1)
  3198  			if err != nil {
  3199  				b.Fatal(err)
  3200  			}
  3201  		}
  3202  	})
  3203  }