github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/consistency_queue_test.go (about)

     1  // Copyright 2016 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package kvserver_test
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"fmt"
    17  	"io/ioutil"
    18  	"math/rand"
    19  	"path/filepath"
    20  	"testing"
    21  	"time"
    22  
    23  	"github.com/cockroachdb/cockroach/pkg/base"
    24  	"github.com/cockroachdb/cockroach/pkg/config"
    25  	"github.com/cockroachdb/cockroach/pkg/kv"
    26  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
    27  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
    28  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader"
    29  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    30  	"github.com/cockroachdb/cockroach/pkg/storage"
    31  	"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
    32  	"github.com/cockroachdb/cockroach/pkg/testutils"
    33  	"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
    34  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    35  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    36  	"github.com/cockroachdb/cockroach/pkg/util/syncutil"
    37  	"github.com/cockroachdb/cockroach/pkg/util/uuid"
    38  	"github.com/stretchr/testify/assert"
    39  	"github.com/stretchr/testify/require"
    40  )
    41  
    42  // TestConsistencyQueueRequiresLive verifies the queue will not
    43  // process ranges whose replicas are not all live.
    44  func TestConsistencyQueueRequiresLive(t *testing.T) {
    45  	defer leaktest.AfterTest(t)()
    46  	sc := kvserver.TestStoreConfig(nil)
    47  	sc.Clock = nil // manual clock
    48  	mtc := &multiTestContext{storeConfig: &sc}
    49  	defer mtc.Stop()
    50  	mtc.Start(t, 3)
    51  
    52  	// Replicate the range to three nodes.
    53  	repl := mtc.stores[0].LookupReplica(roachpb.RKeyMin)
    54  	rangeID := repl.RangeID
    55  	mtc.replicateRange(rangeID, 1, 2)
    56  
    57  	// Verify that queueing is immediately possible.
    58  	if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue(
    59  		context.Background(), mtc.clock().Now(), repl, config.NewSystemConfig(sc.DefaultZoneConfig)); !shouldQ {
    60  		t.Fatalf("expected shouldQ true; got %t, %f", shouldQ, priority)
    61  	}
    62  
    63  	// Stop a node and expire leases.
    64  	mtc.stopStore(2)
    65  	mtc.advanceClock(context.Background())
    66  
    67  	if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue(
    68  		context.Background(), mtc.clock().Now(), repl, config.NewSystemConfig(sc.DefaultZoneConfig)); shouldQ {
    69  		t.Fatalf("expected shouldQ false; got %t, %f", shouldQ, priority)
    70  	}
    71  }
    72  
    73  // TestCheckConsistencyMultiStore creates a node with three stores
    74  // with three way replication. A value is added to the node, and a
    75  // consistency check is run.
    76  func TestCheckConsistencyMultiStore(t *testing.T) {
    77  	defer leaktest.AfterTest(t)()
    78  
    79  	const numStores = 3
    80  	mtc := &multiTestContext{}
    81  	defer mtc.Stop()
    82  	mtc.Start(t, numStores)
    83  	// Setup replication of range 1 on store 0 to stores 1 and 2.
    84  	mtc.replicateRange(1, 1, 2)
    85  
    86  	// Write something to the DB.
    87  	putArgs := putArgs([]byte("a"), []byte("b"))
    88  	if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), putArgs); err != nil {
    89  		t.Fatal(err)
    90  	}
    91  
    92  	// Run consistency check.
    93  	checkArgs := roachpb.CheckConsistencyRequest{
    94  		RequestHeader: roachpb.RequestHeader{
    95  			// span of keys that include "a".
    96  			Key:    []byte("a"),
    97  			EndKey: []byte("aa"),
    98  		},
    99  	}
   100  	if _, err := kv.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{
   101  		Timestamp: mtc.stores[0].Clock().Now(),
   102  	}, &checkArgs); err != nil {
   103  		t.Fatal(err)
   104  	}
   105  }
   106  
   107  // TestCheckConsistencyReplay verifies that two ComputeChecksum requests with
   108  // the same checksum ID are not committed to the Raft log, even if DistSender
   109  // retries the request.
   110  func TestCheckConsistencyReplay(t *testing.T) {
   111  	defer leaktest.AfterTest(t)()
   112  
   113  	type applyKey struct {
   114  		checksumID uuid.UUID
   115  		storeID    roachpb.StoreID
   116  	}
   117  	var state struct {
   118  		syncutil.Mutex
   119  		forcedRetry bool
   120  		applies     map[applyKey]int
   121  	}
   122  	state.applies = map[applyKey]int{}
   123  
   124  	var mtc *multiTestContext
   125  	ctx := context.Background()
   126  	storeCfg := kvserver.TestStoreConfig(nil /* clock */)
   127  
   128  	// Arrange to count the number of times each checksum command applies to each
   129  	// store.
   130  	storeCfg.TestingKnobs.TestingApplyFilter = func(args kvserverbase.ApplyFilterArgs) (int, *roachpb.Error) {
   131  		state.Lock()
   132  		defer state.Unlock()
   133  		if ccr := args.ComputeChecksum; ccr != nil {
   134  			state.applies[applyKey{ccr.ChecksumID, args.StoreID}]++
   135  		}
   136  		return 0, nil
   137  	}
   138  
   139  	// Arrange to trigger a retry when a ComputeChecksum request arrives.
   140  	storeCfg.TestingKnobs.TestingResponseFilter = func(
   141  		ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse,
   142  	) *roachpb.Error {
   143  		state.Lock()
   144  		defer state.Unlock()
   145  		if ba.IsSingleComputeChecksumRequest() && !state.forcedRetry {
   146  			state.forcedRetry = true
   147  			return roachpb.NewError(roachpb.NewSendError("injected failure"))
   148  		}
   149  		return nil
   150  	}
   151  
   152  	mtc = &multiTestContext{storeConfig: &storeCfg}
   153  	defer mtc.Stop()
   154  	mtc.Start(t, 2)
   155  
   156  	mtc.replicateRange(roachpb.RangeID(1), 1)
   157  
   158  	checkArgs := roachpb.CheckConsistencyRequest{
   159  		RequestHeader: roachpb.RequestHeader{
   160  			Key:    []byte("a"),
   161  			EndKey: []byte("b"),
   162  		},
   163  	}
   164  	if _, err := kv.SendWrapped(ctx, mtc.Store(0).TestSender(), &checkArgs); err != nil {
   165  		t.Fatal(err)
   166  	}
   167  
   168  	state.Lock()
   169  	defer state.Unlock()
   170  	for applyKey, count := range state.applies {
   171  		if count != 1 {
   172  			t.Errorf("checksum %s was applied %d times to s%d (expected once)",
   173  				applyKey.checksumID, count, applyKey.storeID)
   174  		}
   175  	}
   176  }
   177  
   178  func TestCheckConsistencyInconsistent(t *testing.T) {
   179  	defer leaktest.AfterTest(t)()
   180  
   181  	sc := kvserver.TestStoreConfig(nil)
   182  	sc.Clock = nil // manual clock
   183  	mtc := &multiTestContext{
   184  		storeConfig: &sc,
   185  		// This test was written before the multiTestContext started creating many
   186  		// system ranges at startup, and hasn't been update to take that into
   187  		// account.
   188  		startWithSingleRange: true,
   189  	}
   190  
   191  	const numStores = 3
   192  
   193  	dir, cleanup := testutils.TempDir(t)
   194  	defer cleanup()
   195  	cache := storage.NewRocksDBCache(1 << 20)
   196  	defer cache.Release()
   197  
   198  	// Use on-disk stores because we want to take a RocksDB checkpoint and be
   199  	// able to find it.
   200  	for i := 0; i < numStores; i++ {
   201  		eng, err := storage.NewRocksDB(storage.RocksDBConfig{
   202  			StorageConfig: base.StorageConfig{
   203  				Dir: filepath.Join(dir, fmt.Sprintf("%d", i)),
   204  			},
   205  		}, cache)
   206  		if err != nil {
   207  			t.Fatal(err)
   208  		}
   209  		defer eng.Close()
   210  		mtc.engines = append(mtc.engines, eng)
   211  	}
   212  
   213  	// s1 will report a diff with inconsistent key "e", and only s2 has that
   214  	// write (s3 agrees with s1).
   215  	diffKey := []byte("e")
   216  	var diffTimestamp hlc.Timestamp
   217  	notifyReportDiff := make(chan struct{}, 1)
   218  	sc.TestingKnobs.ConsistencyTestingKnobs.BadChecksumReportDiff =
   219  		func(s roachpb.StoreIdent, diff kvserver.ReplicaSnapshotDiffSlice) {
   220  			if s != *mtc.Store(0).Ident {
   221  				t.Errorf("BadChecksumReportDiff called from follower (StoreIdent = %v)", s)
   222  				return
   223  			}
   224  			if len(diff) != 1 {
   225  				t.Errorf("diff length = %d, diff = %v", len(diff), diff)
   226  				return
   227  			}
   228  			d := diff[0]
   229  			if d.LeaseHolder || !bytes.Equal(diffKey, d.Key) || diffTimestamp != d.Timestamp {
   230  				t.Errorf("diff = %v", d)
   231  			}
   232  
   233  			diff[0].Timestamp.Logical = 987 // mock this out for a consistent string below
   234  
   235  			act := diff.String()
   236  
   237  			exp := `--- leaseholder
   238  +++ follower
   239  +0.000000123,987 "e"
   240  +    ts:1970-01-01 00:00:00.000000123 +0000 UTC
   241  +    value:"\x00\x00\x00\x00\x01T"
   242  +    raw mvcc_key/value: 6500000000000000007b000003db0d 000000000154
   243  `
   244  			if act != exp {
   245  				// We already logged the actual one above.
   246  				t.Errorf("expected:\n%s\ngot:\n%s", exp, act)
   247  			}
   248  
   249  			notifyReportDiff <- struct{}{}
   250  		}
   251  	// s2 (index 1) will panic.
   252  	notifyFatal := make(chan struct{}, 1)
   253  	sc.TestingKnobs.ConsistencyTestingKnobs.OnBadChecksumFatal = func(s roachpb.StoreIdent) {
   254  		if s != *mtc.Store(1).Ident {
   255  			t.Errorf("OnBadChecksumFatal called from %v", s)
   256  			return
   257  		}
   258  		notifyFatal <- struct{}{}
   259  	}
   260  
   261  	defer mtc.Stop()
   262  	mtc.Start(t, numStores)
   263  	// Setup replication of range 1 on store 0 to stores 1 and 2.
   264  	mtc.replicateRange(1, 1, 2)
   265  
   266  	// Write something to the DB.
   267  	pArgs := putArgs([]byte("a"), []byte("b"))
   268  	if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil {
   269  		t.Fatal(err)
   270  	}
   271  	pArgs = putArgs([]byte("c"), []byte("d"))
   272  	if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil {
   273  		t.Fatal(err)
   274  	}
   275  
   276  	runCheck := func() *roachpb.CheckConsistencyResponse {
   277  		checkArgs := roachpb.CheckConsistencyRequest{
   278  			RequestHeader: roachpb.RequestHeader{
   279  				// span of keys that include "a" & "c".
   280  				Key:    []byte("a"),
   281  				EndKey: []byte("z"),
   282  			},
   283  			Mode: roachpb.ChecksumMode_CHECK_VIA_QUEUE,
   284  		}
   285  		resp, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), &checkArgs)
   286  		if err != nil {
   287  			t.Fatal(err)
   288  		}
   289  		return resp.(*roachpb.CheckConsistencyResponse)
   290  	}
   291  
   292  	checkpoints := func(nodeIdx int) []string {
   293  		pat := filepath.Join(mtc.engines[nodeIdx].GetAuxiliaryDir(), "checkpoints") + "/*"
   294  		m, err := filepath.Glob(pat)
   295  		assert.NoError(t, err)
   296  		return m
   297  	}
   298  
   299  	// Run the check the first time, it shouldn't find anything.
   300  	respOK := runCheck()
   301  	assert.Len(t, respOK.Result, 1)
   302  	assert.Equal(t, roachpb.CheckConsistencyResponse_RANGE_CONSISTENT, respOK.Result[0].Status)
   303  	select {
   304  	case <-notifyReportDiff:
   305  		t.Fatal("unexpected diff")
   306  	case <-notifyFatal:
   307  		t.Fatal("unexpected panic")
   308  	default:
   309  	}
   310  
   311  	// No checkpoints should have been created.
   312  	for i := 0; i < numStores; i++ {
   313  		assert.Empty(t, checkpoints(i))
   314  	}
   315  
   316  	// Write some arbitrary data only to store 1. Inconsistent key "e"!
   317  	var val roachpb.Value
   318  	val.SetInt(42)
   319  	diffTimestamp = mtc.stores[1].Clock().Now()
   320  	if err := storage.MVCCPut(
   321  		context.Background(), mtc.stores[1].Engine(), nil, diffKey, diffTimestamp, val, nil,
   322  	); err != nil {
   323  		t.Fatal(err)
   324  	}
   325  
   326  	// Run consistency check again, this time it should find something.
   327  	resp := runCheck()
   328  
   329  	select {
   330  	case <-notifyReportDiff:
   331  	case <-time.After(5 * time.Second):
   332  		t.Fatal("CheckConsistency() failed to report a diff as expected")
   333  	}
   334  	select {
   335  	case <-notifyFatal:
   336  	case <-time.After(5 * time.Second):
   337  		t.Fatal("CheckConsistency() failed to panic as expected")
   338  	}
   339  
   340  	// Checkpoints should have been created on all stores and they're not empty.
   341  	for i := 0; i < numStores; i++ {
   342  		cps := checkpoints(i)
   343  		assert.Len(t, cps, 1)
   344  		cpEng, err := storage.NewRocksDB(storage.RocksDBConfig{
   345  			StorageConfig: base.StorageConfig{
   346  				Dir: cps[0],
   347  			},
   348  		}, cache)
   349  		assert.NoError(t, err)
   350  		defer cpEng.Close()
   351  
   352  		iter := cpEng.NewIterator(storage.IterOptions{UpperBound: []byte("\xff")})
   353  		defer iter.Close()
   354  
   355  		ms, err := storage.ComputeStatsGo(iter, roachpb.KeyMin, roachpb.KeyMax, 0 /* nowNanos */)
   356  		assert.NoError(t, err)
   357  
   358  		assert.NotZero(t, ms.KeyBytes)
   359  	}
   360  
   361  	assert.Len(t, resp.Result, 1)
   362  	assert.Equal(t, roachpb.CheckConsistencyResponse_RANGE_INCONSISTENT, resp.Result[0].Status)
   363  	assert.Contains(t, resp.Result[0].Detail, `[minority]`)
   364  	assert.Contains(t, resp.Result[0].Detail, `stats`)
   365  
   366  	// A death rattle should have been written on s2 (store index 1).
   367  	b, err := ioutil.ReadFile(base.PreventedStartupFile(mtc.stores[1].Engine().GetAuxiliaryDir()))
   368  	require.NoError(t, err)
   369  	require.NotEmpty(t, b)
   370  }
   371  
   372  // TestConsistencyQueueRecomputeStats is an end-to-end test of the mechanism CockroachDB
   373  // employs to adjust incorrect MVCCStats ("incorrect" meaning not an inconsistency of
   374  // these stats between replicas, but a delta between persisted stats and those one
   375  // would obtain via a recomputation from the on-disk state), namely a call to
   376  // RecomputeStats triggered from the consistency checker (which also recomputes the stats).
   377  //
   378  // The test splits off a range on a single node cluster backed by an on-disk RocksDB
   379  // instance, and takes that node offline to perturb its stats. Next, it restarts the
   380  // node as part of a cluster, upreplicates the range, and waits for the stats
   381  // divergence to disappear.
   382  //
   383  // The upreplication here is immaterial and serves only to add realism to the test.
   384  func TestConsistencyQueueRecomputeStats(t *testing.T) {
   385  	defer leaktest.AfterTest(t)()
   386  	testutils.RunTrueAndFalse(t, "hadEstimates", testConsistencyQueueRecomputeStatsImpl)
   387  }
   388  
   389  func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {
   390  	ctx := context.Background()
   391  
   392  	path, cleanup := testutils.TempDir(t)
   393  	defer cleanup()
   394  
   395  	// Set scanner timings that minimize waiting in this test.
   396  	tsArgs := base.TestServerArgs{
   397  		ScanInterval:    time.Second,
   398  		ScanMinIdleTime: 0,
   399  		ScanMaxIdleTime: 100 * time.Millisecond,
   400  	}
   401  
   402  	ccCh := make(chan roachpb.CheckConsistencyResponse, 1)
   403  	knobs := &kvserver.StoreTestingKnobs{}
   404  	knobs.ConsistencyTestingKnobs.ConsistencyQueueResultHook = func(resp roachpb.CheckConsistencyResponse) {
   405  		if len(resp.Result) == 0 || resp.Result[0].Status != roachpb.CheckConsistencyResponse_RANGE_CONSISTENT_STATS_INCORRECT {
   406  			// Ignore recomputations triggered by the time series ranges.
   407  			return
   408  		}
   409  		select {
   410  		case ccCh <- resp:
   411  		default:
   412  		}
   413  	}
   414  	tsArgs.Knobs.Store = knobs
   415  	nodeZeroArgs := tsArgs
   416  	nodeZeroArgs.StoreSpecs = []base.StoreSpec{{
   417  		Path: path,
   418  	}}
   419  
   420  	clusterArgs := base.TestClusterArgs{
   421  		ReplicationMode: base.ReplicationManual,
   422  		ServerArgs:      tsArgs,
   423  		ServerArgsPerNode: map[int]base.TestServerArgs{
   424  			0: nodeZeroArgs,
   425  		},
   426  	}
   427  
   428  	key := []byte("a")
   429  
   430  	computeDelta := func(db *kv.DB) enginepb.MVCCStats {
   431  		var b kv.Batch
   432  		b.AddRawRequest(&roachpb.RecomputeStatsRequest{
   433  			RequestHeader: roachpb.RequestHeader{Key: key},
   434  			DryRun:        true,
   435  		})
   436  		if err := db.Run(ctx, &b); err != nil {
   437  			t.Fatal(err)
   438  		}
   439  		resp := b.RawResponse().Responses[0].GetInner().(*roachpb.RecomputeStatsResponse)
   440  		delta := enginepb.MVCCStats(resp.AddedDelta)
   441  		delta.AgeTo(0)
   442  		return delta
   443  	}
   444  
   445  	rangeID := func() roachpb.RangeID {
   446  		tc := testcluster.StartTestCluster(t, 1, clusterArgs)
   447  		defer tc.Stopper().Stop(context.Background())
   448  
   449  		db0 := tc.Servers[0].DB()
   450  
   451  		// Split off a range so that we get away from the timeseries writes, which
   452  		// pollute the stats with ContainsEstimates=true. Note that the split clears
   453  		// the right hand side (which is what we operate on) from that flag.
   454  		if err := db0.AdminSplit(ctx, key, key, hlc.MaxTimestamp /* expirationTime */); err != nil {
   455  			t.Fatal(err)
   456  		}
   457  
   458  		delta := computeDelta(db0)
   459  
   460  		if delta != (enginepb.MVCCStats{}) {
   461  			t.Fatalf("unexpected initial stats adjustment of %+v", delta)
   462  		}
   463  
   464  		rangeDesc, err := tc.LookupRange(key)
   465  		if err != nil {
   466  			t.Fatal(err)
   467  		}
   468  
   469  		return rangeDesc.RangeID
   470  	}()
   471  
   472  	const sysCountGarbage = 123000
   473  
   474  	func() {
   475  		cache := storage.NewRocksDBCache(1 << 20)
   476  		defer cache.Release()
   477  		eng, err := storage.NewRocksDB(storage.RocksDBConfig{
   478  			StorageConfig: base.StorageConfig{
   479  				Dir:       path,
   480  				MustExist: true,
   481  			},
   482  		}, cache)
   483  		if err != nil {
   484  			t.Fatal(err)
   485  		}
   486  		defer eng.Close()
   487  
   488  		rsl := stateloader.Make(rangeID)
   489  		ms, err := rsl.LoadMVCCStats(ctx, eng)
   490  		if err != nil {
   491  			t.Fatal(err)
   492  		}
   493  
   494  		// Put some garbage in the stats that we're hoping the consistency queue will
   495  		// trigger a removal of via RecomputeStats. SysCount was chosen because it is
   496  		// not affected by the workload we run below and also does not influence the
   497  		// GC queue score.
   498  		ms.SysCount += sysCountGarbage
   499  		ms.ContainsEstimates = 0
   500  		if hadEstimates {
   501  			ms.ContainsEstimates = 123
   502  		}
   503  
   504  		// Overwrite with the new stats; remember that this range hasn't upreplicated,
   505  		// so the consistency checker won't see any replica divergence when it runs,
   506  		// but it should definitely see that its recomputed stats mismatch.
   507  		if err := rsl.SetMVCCStats(ctx, eng, &ms); err != nil {
   508  			t.Fatal(err)
   509  		}
   510  	}()
   511  
   512  	// Now that we've tampered with the stats, restart the cluster and extend it
   513  	// to three nodes.
   514  	const numNodes = 3
   515  	tc := testcluster.StartTestCluster(t, numNodes, clusterArgs)
   516  	defer tc.Stopper().Stop(ctx)
   517  
   518  	db0 := tc.Servers[0].DB()
   519  
   520  	// Run a goroutine that writes to the range in a tight loop. This tests that
   521  	// RecomputeStats does not see any skew in its MVCC stats when they are
   522  	// modified concurrently. Note that these writes don't interfere with the
   523  	// field we modified (SysCount).
   524  	tc.Stopper().RunWorker(ctx, func(ctx context.Context) {
   525  		// This channel terminates the loop early if the test takes more than five
   526  		// seconds. This is useful for stress race runs in CI where the tight loop
   527  		// can starve the actual work to be done.
   528  		done := time.After(5 * time.Second)
   529  		for {
   530  			if err := db0.Put(ctx, fmt.Sprintf("%s%d", key, rand.Int63()), "ballast"); err != nil {
   531  				t.Error(err)
   532  			}
   533  
   534  			select {
   535  			case <-ctx.Done():
   536  				return
   537  			case <-tc.Stopper().ShouldQuiesce():
   538  				return
   539  			case <-done:
   540  				return
   541  			default:
   542  			}
   543  		}
   544  	})
   545  
   546  	var targets []roachpb.ReplicationTarget
   547  	for i := 1; i < numNodes; i++ {
   548  		targets = append(targets, tc.Target(i))
   549  	}
   550  	if _, err := tc.AddReplicas(key, targets...); err != nil {
   551  		t.Fatal(err)
   552  	}
   553  
   554  	// Force a run of the consistency queue, otherwise it might take a while.
   555  	ts := tc.Servers[0]
   556  	store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID())
   557  	if pErr != nil {
   558  		t.Fatal(pErr)
   559  	}
   560  	if err := store.ForceConsistencyQueueProcess(); err != nil {
   561  		t.Fatal(err)
   562  	}
   563  
   564  	// The stats should magically repair themselves. We'll first do a quick check
   565  	// and then a full recomputation.
   566  	repl, _, err := ts.Stores().GetReplicaForRangeID(rangeID)
   567  	if err != nil {
   568  		t.Fatal(err)
   569  	}
   570  	ms := repl.GetMVCCStats()
   571  	if ms.SysCount >= sysCountGarbage {
   572  		t.Fatalf("still have a SysCount of %d", ms.SysCount)
   573  	}
   574  
   575  	if delta := computeDelta(db0); delta != (enginepb.MVCCStats{}) {
   576  		t.Fatalf("stats still in need of adjustment: %+v", delta)
   577  	}
   578  
   579  	select {
   580  	case resp := <-ccCh:
   581  		assert.Contains(t, resp.Result[0].Detail, `KeyBytes`) // contains printed stats
   582  		assert.Equal(t, roachpb.CheckConsistencyResponse_RANGE_CONSISTENT_STATS_INCORRECT, resp.Result[0].Status)
   583  		assert.False(t, hadEstimates)
   584  	default:
   585  		assert.True(t, hadEstimates)
   586  	}
   587  }