github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/helpers_test.go (about)

     1  // Copyright 2016 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  // This file includes test-only helper methods added to types in
    12  // package storage. These methods are only linked in to tests in this
    13  // directory (but may be used from tests in both package storage and
    14  // package storage_test).
    15  
    16  package kvserver
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"math/rand"
    22  	"testing"
    23  	"time"
    24  	"unsafe"
    25  
    26  	circuit "github.com/cockroachdb/circuitbreaker"
    27  	"github.com/cockroachdb/cockroach/pkg/config"
    28  	"github.com/cockroachdb/cockroach/pkg/config/zonepb"
    29  	"github.com/cockroachdb/cockroach/pkg/kv"
    30  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval"
    31  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result"
    32  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
    33  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer"
    34  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    35  	"github.com/cockroachdb/cockroach/pkg/rpc"
    36  	"github.com/cockroachdb/cockroach/pkg/storage"
    37  	"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
    38  	"github.com/cockroachdb/cockroach/pkg/util"
    39  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    40  	"github.com/cockroachdb/cockroach/pkg/util/log"
    41  	"github.com/cockroachdb/cockroach/pkg/util/quotapool"
    42  	"github.com/cockroachdb/cockroach/pkg/util/randutil"
    43  	"github.com/cockroachdb/cockroach/pkg/util/timeutil"
    44  	"github.com/cockroachdb/errors"
    45  	"go.etcd.io/etcd/raft"
    46  )
    47  
    48  func (s *Store) Transport() *RaftTransport {
    49  	return s.cfg.Transport
    50  }
    51  
    52  func (s *Store) FindTargetAndTransferLease(
    53  	ctx context.Context, repl *Replica, desc *roachpb.RangeDescriptor, zone *zonepb.ZoneConfig,
    54  ) (bool, error) {
    55  	return s.replicateQueue.findTargetAndTransferLease(
    56  		ctx, repl, desc, zone, transferLeaseOptions{},
    57  	)
    58  }
    59  
    60  // AddReplica adds the replica to the store's replica map and to the sorted
    61  // replicasByKey slice. To be used only by unittests.
    62  func (s *Store) AddReplica(repl *Replica) error {
    63  	s.mu.Lock()
    64  	defer s.mu.Unlock()
    65  	if err := s.addReplicaInternalLocked(repl); err != nil {
    66  		return err
    67  	}
    68  	s.metrics.ReplicaCount.Inc(1)
    69  	return nil
    70  }
    71  
    72  // ComputeMVCCStats immediately computes correct total MVCC usage statistics
    73  // for the store, returning the computed values (but without modifying the
    74  // store).
    75  func (s *Store) ComputeMVCCStats() (enginepb.MVCCStats, error) {
    76  	var totalStats enginepb.MVCCStats
    77  	var err error
    78  
    79  	now := s.Clock().PhysicalNow()
    80  	newStoreReplicaVisitor(s).Visit(func(r *Replica) bool {
    81  		var stats enginepb.MVCCStats
    82  		stats, err = rditer.ComputeStatsForRange(r.Desc(), s.Engine(), now)
    83  		if err != nil {
    84  			return false
    85  		}
    86  		totalStats.Add(stats)
    87  		return true
    88  	})
    89  	return totalStats, err
    90  }
    91  
    92  // ConsistencyQueueShouldQueue invokes the shouldQueue method on the
    93  // store's consistency queue.
    94  func (s *Store) ConsistencyQueueShouldQueue(
    95  	ctx context.Context, now hlc.Timestamp, r *Replica, cfg *config.SystemConfig,
    96  ) (bool, float64) {
    97  	return s.consistencyQueue.shouldQueue(ctx, now, r, cfg)
    98  }
    99  
   100  // LogReplicaChangeTest adds a fake replica change event to the log for the
   101  // range which contains the given key.
   102  func (s *Store) LogReplicaChangeTest(
   103  	ctx context.Context,
   104  	txn *kv.Txn,
   105  	changeType roachpb.ReplicaChangeType,
   106  	replica roachpb.ReplicaDescriptor,
   107  	desc roachpb.RangeDescriptor,
   108  	reason kvserverpb.RangeLogEventReason,
   109  	details string,
   110  ) error {
   111  	return s.logChange(ctx, txn, changeType, replica, desc, reason, details)
   112  }
   113  
   114  // ReplicateQueuePurgatoryLength returns the number of replicas in replicate
   115  // queue purgatory.
   116  func (s *Store) ReplicateQueuePurgatoryLength() int {
   117  	return s.replicateQueue.PurgatoryLength()
   118  }
   119  
   120  // SplitQueuePurgatoryLength returns the number of replicas in split
   121  // queue purgatory.
   122  func (s *Store) SplitQueuePurgatoryLength() int {
   123  	return s.splitQueue.PurgatoryLength()
   124  }
   125  
   126  // SetRaftLogQueueActive enables or disables the raft log queue.
   127  func (s *Store) SetRaftLogQueueActive(active bool) {
   128  	s.setRaftLogQueueActive(active)
   129  }
   130  
   131  // SetReplicaGCQueueActive enables or disables the replica GC queue.
   132  func (s *Store) SetReplicaGCQueueActive(active bool) {
   133  	s.setReplicaGCQueueActive(active)
   134  }
   135  
   136  // SetSplitQueueActive enables or disables the split queue.
   137  func (s *Store) SetSplitQueueActive(active bool) {
   138  	s.setSplitQueueActive(active)
   139  }
   140  
   141  // SetMergeQueueActive enables or disables the split queue.
   142  func (s *Store) SetMergeQueueActive(active bool) {
   143  	s.setMergeQueueActive(active)
   144  }
   145  
   146  // SetRaftSnapshotQueueActive enables or disables the raft snapshot queue.
   147  func (s *Store) SetRaftSnapshotQueueActive(active bool) {
   148  	s.setRaftSnapshotQueueActive(active)
   149  }
   150  
   151  // SetReplicaScannerActive enables or disables the scanner. Note that while
   152  // inactive, removals are still processed.
   153  func (s *Store) SetReplicaScannerActive(active bool) {
   154  	s.setScannerActive(active)
   155  }
   156  
   157  // EnqueueRaftUpdateCheck enqueues the replica for a Raft update check, forcing
   158  // the replica's Raft group into existence.
   159  func (s *Store) EnqueueRaftUpdateCheck(rangeID roachpb.RangeID) {
   160  	s.enqueueRaftUpdateCheck(rangeID)
   161  }
   162  
   163  func manualQueue(s *Store, q queueImpl, repl *Replica) error {
   164  	cfg := s.Gossip().GetSystemConfig()
   165  	if cfg == nil {
   166  		return fmt.Errorf("%s: system config not yet available", s)
   167  	}
   168  	ctx := repl.AnnotateCtx(context.Background())
   169  	return q.process(ctx, repl, cfg)
   170  }
   171  
   172  // ManualGC processes the specified replica using the store's GC queue.
   173  func (s *Store) ManualGC(repl *Replica) error {
   174  	return manualQueue(s, s.gcQueue, repl)
   175  }
   176  
   177  // ManualReplicaGC processes the specified replica using the store's replica
   178  // GC queue.
   179  func (s *Store) ManualReplicaGC(repl *Replica) error {
   180  	return manualQueue(s, s.replicaGCQueue, repl)
   181  }
   182  
   183  // ManualRaftSnapshot will manually send a raft snapshot to the target replica.
   184  func (s *Store) ManualRaftSnapshot(repl *Replica, target roachpb.ReplicaID) error {
   185  	return s.raftSnapshotQueue.processRaftSnapshot(context.Background(), repl, target)
   186  }
   187  
   188  func (s *Store) ReservationCount() int {
   189  	return len(s.snapshotApplySem)
   190  }
   191  
   192  // ClearClosedTimestampStorage clears the closed timestamp storage of all
   193  // knowledge about closed timestamps.
   194  func (s *Store) ClearClosedTimestampStorage() {
   195  	s.cfg.ClosedTimestamp.Storage.Clear()
   196  }
   197  
   198  // RequestClosedTimestamp instructs the closed timestamp client to request the
   199  // relevant node to publish its MLAI for the provided range.
   200  func (s *Store) RequestClosedTimestamp(nodeID roachpb.NodeID, rangeID roachpb.RangeID) {
   201  	s.cfg.ClosedTimestamp.Clients.Request(nodeID, rangeID)
   202  }
   203  
   204  // AssertInvariants verifies that the store's bookkeping is self-consistent. It
   205  // is only valid to call this method when there is no in-flight traffic to the
   206  // store (e.g., after the store is shut down).
   207  func (s *Store) AssertInvariants() {
   208  	s.mu.RLock()
   209  	defer s.mu.RUnlock()
   210  	s.mu.replicas.Range(func(_ int64, p unsafe.Pointer) bool {
   211  		ctx := s.cfg.AmbientCtx.AnnotateCtx(context.Background())
   212  		repl := (*Replica)(p)
   213  		// We would normally need to hold repl.raftMu. Otherwise we can observe an
   214  		// initialized replica that is not in s.replicasByKey, e.g., if we race with
   215  		// a goroutine that is currently initializing repl. The lock ordering makes
   216  		// acquiring repl.raftMu challenging; instead we require that this method is
   217  		// called only when there is no in-flight traffic to the store, at which
   218  		// point acquiring repl.raftMu is unnecessary.
   219  		if repl.IsInitialized() {
   220  			if ex := s.mu.replicasByKey.Get(repl); ex != repl {
   221  				log.Fatalf(ctx, "%v misplaced in replicasByKey; found %v instead", repl, ex)
   222  			}
   223  		} else if _, ok := s.mu.uninitReplicas[repl.RangeID]; !ok {
   224  			log.Fatalf(ctx, "%v missing from uninitReplicas", repl)
   225  		}
   226  		return true // keep iterating
   227  	})
   228  }
   229  
   230  func NewTestStorePool(cfg StoreConfig) *StorePool {
   231  	TimeUntilStoreDead.Override(&cfg.Settings.SV, TestTimeUntilStoreDeadOff)
   232  	return NewStorePool(
   233  		cfg.AmbientCtx,
   234  		cfg.Settings,
   235  		cfg.Gossip,
   236  		cfg.Clock,
   237  		// NodeCountFunc
   238  		func() int {
   239  			return 1
   240  		},
   241  		func(roachpb.NodeID, time.Time, time.Duration) kvserverpb.NodeLivenessStatus {
   242  			return kvserverpb.NodeLivenessStatus_LIVE
   243  		},
   244  		/* deterministic */ false,
   245  	)
   246  }
   247  
   248  func (r *Replica) AssertState(ctx context.Context, reader storage.Reader) {
   249  	r.raftMu.Lock()
   250  	defer r.raftMu.Unlock()
   251  	r.mu.Lock()
   252  	defer r.mu.Unlock()
   253  	r.assertStateLocked(ctx, reader)
   254  }
   255  
   256  func (r *Replica) RaftLock() {
   257  	r.raftMu.Lock()
   258  }
   259  
   260  func (r *Replica) RaftUnlock() {
   261  	r.raftMu.Unlock()
   262  }
   263  
   264  // GetLastIndex is the same function as LastIndex but it does not require
   265  // that the replica lock is held.
   266  func (r *Replica) GetLastIndex() (uint64, error) {
   267  	r.mu.Lock()
   268  	defer r.mu.Unlock()
   269  	return r.raftLastIndexLocked()
   270  }
   271  
   272  func (r *Replica) LastAssignedLeaseIndex() uint64 {
   273  	r.mu.RLock()
   274  	defer r.mu.RUnlock()
   275  	return r.mu.proposalBuf.LastAssignedLeaseIndexRLocked()
   276  }
   277  
   278  // MaxClosed returns the maximum closed timestamp known to the Replica.
   279  func (r *Replica) MaxClosed(ctx context.Context) (_ hlc.Timestamp, ok bool) {
   280  	return r.maxClosed(ctx)
   281  }
   282  
   283  // SetQuotaPool allows the caller to set a replica's quota pool initialized to
   284  // a given quota. Additionally it initializes the replica's quota release queue
   285  // and its command sizes map. Only safe to call on the replica that is both
   286  // lease holder and raft leader while holding the raftMu.
   287  func (r *Replica) InitQuotaPool(quota uint64) error {
   288  	r.mu.Lock()
   289  	defer r.mu.Unlock()
   290  	var appliedIndex uint64
   291  	err := r.withRaftGroupLocked(false, func(r *raft.RawNode) (unquiesceAndWakeLeader bool, err error) {
   292  		appliedIndex = r.BasicStatus().Applied
   293  		return false, nil
   294  	})
   295  	if err != nil {
   296  		return err
   297  	}
   298  
   299  	r.mu.proposalQuotaBaseIndex = appliedIndex
   300  	if r.mu.proposalQuota != nil {
   301  		r.mu.proposalQuota.Close("re-creating")
   302  	}
   303  	r.mu.proposalQuota = quotapool.NewIntPool(r.rangeStr.String(), quota)
   304  	r.mu.quotaReleaseQueue = nil
   305  	return nil
   306  }
   307  
   308  // QuotaAvailable returns the quota available in the replica's quota pool. Only
   309  // safe to call on the replica that is both lease holder and raft leader.
   310  func (r *Replica) QuotaAvailable() uint64 {
   311  	r.mu.Lock()
   312  	defer r.mu.Unlock()
   313  	return r.mu.proposalQuota.ApproximateQuota()
   314  }
   315  
   316  // GetProposalQuota returns the Replica's internal proposal quota.
   317  // It is not safe to be used concurrently so do ensure that the Replica is
   318  // no longer active.
   319  func (r *Replica) GetProposalQuota() *quotapool.IntPool {
   320  	r.mu.Lock()
   321  	defer r.mu.Unlock()
   322  	return r.mu.proposalQuota
   323  }
   324  
   325  func (r *Replica) QuotaReleaseQueueLen() int {
   326  	r.mu.Lock()
   327  	defer r.mu.Unlock()
   328  	return len(r.mu.quotaReleaseQueue)
   329  }
   330  
   331  func (r *Replica) IsFollowerActiveSince(
   332  	ctx context.Context, followerID roachpb.ReplicaID, threshold time.Duration,
   333  ) bool {
   334  	r.mu.RLock()
   335  	defer r.mu.RUnlock()
   336  	return r.mu.lastUpdateTimes.isFollowerActiveSince(ctx, followerID, timeutil.Now(), threshold)
   337  }
   338  
   339  // GetTSCacheHighWater returns the high water mark of the replica's timestamp
   340  // cache.
   341  func (r *Replica) GetTSCacheHighWater() hlc.Timestamp {
   342  	start := roachpb.Key(r.Desc().StartKey)
   343  	end := roachpb.Key(r.Desc().EndKey)
   344  	t, _ := r.store.tsCache.GetMax(start, end)
   345  	return t
   346  }
   347  
   348  // ShouldBackpressureWrites returns whether writes to the range should be
   349  // subject to backpressure.
   350  func (r *Replica) ShouldBackpressureWrites() bool {
   351  	return r.shouldBackpressureWrites()
   352  }
   353  
   354  // GetRaftLogSize returns the approximate raft log size and whether it is
   355  // trustworthy.. See r.mu.raftLogSize for details.
   356  func (r *Replica) GetRaftLogSize() (int64, bool) {
   357  	r.mu.RLock()
   358  	defer r.mu.RUnlock()
   359  	return r.mu.raftLogSize, r.mu.raftLogSizeTrusted
   360  }
   361  
   362  // GetCachedLastTerm returns the cached last term value. May return
   363  // invalidLastTerm if the cache is not set.
   364  func (r *Replica) GetCachedLastTerm() uint64 {
   365  	r.mu.RLock()
   366  	defer r.mu.RUnlock()
   367  	return r.mu.lastTerm
   368  }
   369  
   370  func (r *Replica) IsRaftGroupInitialized() bool {
   371  	r.mu.RLock()
   372  	defer r.mu.RUnlock()
   373  	return r.mu.internalRaftGroup != nil
   374  }
   375  
   376  // GetStoreList exposes getStoreList for testing only, but with a hardcoded
   377  // storeFilter of storeFilterNone.
   378  func (sp *StorePool) GetStoreList() (StoreList, int, int) {
   379  	list, available, throttled := sp.getStoreList(storeFilterNone)
   380  	return list, available, len(throttled)
   381  }
   382  
   383  // Stores returns a copy of sl.stores.
   384  func (sl *StoreList) Stores() []roachpb.StoreDescriptor {
   385  	stores := make([]roachpb.StoreDescriptor, len(sl.stores))
   386  	copy(stores, sl.stores)
   387  	return stores
   388  }
   389  
   390  // SideloadedRaftMuLocked returns r.raftMu.sideloaded. Requires a previous call
   391  // to RaftLock() or some other guarantee that r.raftMu is held.
   392  func (r *Replica) SideloadedRaftMuLocked() SideloadStorage {
   393  	return r.raftMu.sideloaded
   394  }
   395  
   396  // LargestPreviousMaxRangeSizeBytes returns the in-memory value used to mitigate
   397  // backpressure when the zone.RangeMaxSize is decreased.
   398  func (r *Replica) LargestPreviousMaxRangeSizeBytes() int64 {
   399  	r.mu.RLock()
   400  	defer r.mu.RUnlock()
   401  	return r.mu.largestPreviousMaxRangeSizeBytes
   402  }
   403  
   404  func MakeSSTable(key, value string, ts hlc.Timestamp) ([]byte, storage.MVCCKeyValue) {
   405  	sstFile := &storage.MemFile{}
   406  	sst := storage.MakeIngestionSSTWriter(sstFile)
   407  	defer sst.Close()
   408  
   409  	v := roachpb.MakeValueFromBytes([]byte(value))
   410  	v.InitChecksum([]byte(key))
   411  
   412  	kv := storage.MVCCKeyValue{
   413  		Key: storage.MVCCKey{
   414  			Key:       []byte(key),
   415  			Timestamp: ts,
   416  		},
   417  		Value: v.RawBytes,
   418  	}
   419  
   420  	if err := sst.Put(kv.Key, kv.Value); err != nil {
   421  		panic(errors.Wrap(err, "while finishing SSTable"))
   422  	}
   423  	if err := sst.Finish(); err != nil {
   424  		panic(errors.Wrap(err, "while finishing SSTable"))
   425  	}
   426  	return sstFile.Data(), kv
   427  }
   428  
   429  func ProposeAddSSTable(ctx context.Context, key, val string, ts hlc.Timestamp, store *Store) error {
   430  	var ba roachpb.BatchRequest
   431  	ba.RangeID = store.LookupReplica(roachpb.RKey(key)).RangeID
   432  
   433  	var addReq roachpb.AddSSTableRequest
   434  	addReq.Data, _ = MakeSSTable(key, val, ts)
   435  	addReq.Key = roachpb.Key(key)
   436  	addReq.EndKey = addReq.Key.Next()
   437  	ba.Add(&addReq)
   438  
   439  	_, pErr := store.Send(ctx, ba)
   440  	if pErr != nil {
   441  		return pErr.GoError()
   442  	}
   443  	return nil
   444  }
   445  
   446  func SetMockAddSSTable() (undo func()) {
   447  	prev, _ := batcheval.LookupCommand(roachpb.AddSSTable)
   448  
   449  	// TODO(tschottdorf): this already does nontrivial work. Worth open-sourcing the relevant
   450  	// subparts of the real evalAddSSTable to make this test less likely to rot.
   451  	evalAddSSTable := func(
   452  		ctx context.Context, _ storage.ReadWriter, cArgs batcheval.CommandArgs, _ roachpb.Response,
   453  	) (result.Result, error) {
   454  		log.Event(ctx, "evaluated testing-only AddSSTable mock")
   455  		args := cArgs.Args.(*roachpb.AddSSTableRequest)
   456  
   457  		return result.Result{
   458  			Replicated: kvserverpb.ReplicatedEvalResult{
   459  				AddSSTable: &kvserverpb.ReplicatedEvalResult_AddSSTable{
   460  					Data:  args.Data,
   461  					CRC32: util.CRC32(args.Data),
   462  				},
   463  			},
   464  		}, nil
   465  	}
   466  
   467  	batcheval.UnregisterCommand(roachpb.AddSSTable)
   468  	batcheval.RegisterReadWriteCommand(roachpb.AddSSTable, batcheval.DefaultDeclareKeys, evalAddSSTable)
   469  	return func() {
   470  		batcheval.UnregisterCommand(roachpb.AddSSTable)
   471  		batcheval.RegisterReadWriteCommand(roachpb.AddSSTable, prev.DeclareKeys, prev.EvalRW)
   472  	}
   473  }
   474  
   475  // IsQuiescent returns whether the replica is quiescent or not.
   476  func (r *Replica) IsQuiescent() bool {
   477  	r.mu.Lock()
   478  	defer r.mu.Unlock()
   479  	return r.mu.quiescent
   480  }
   481  
   482  // GetQueueLastProcessed returns the last processed timestamp for the
   483  // specified queue, or the zero timestamp if not available.
   484  func (r *Replica) GetQueueLastProcessed(ctx context.Context, queue string) (hlc.Timestamp, error) {
   485  	return r.getQueueLastProcessed(ctx, queue)
   486  }
   487  
   488  func (r *Replica) UnquiesceAndWakeLeader() {
   489  	r.mu.Lock()
   490  	defer r.mu.Unlock()
   491  	r.unquiesceAndWakeLeaderLocked()
   492  }
   493  
   494  func (r *Replica) ReadProtectedTimestamps(ctx context.Context) {
   495  	var ts cachedProtectedTimestampState
   496  	defer r.maybeUpdateCachedProtectedTS(&ts)
   497  	r.mu.RLock()
   498  	defer r.mu.RUnlock()
   499  	ts = r.readProtectedTimestampsRLocked(ctx, nil /* f */)
   500  }
   501  
   502  func (nl *NodeLiveness) SetDrainingInternal(
   503  	ctx context.Context, liveness kvserverpb.Liveness, drain bool,
   504  ) error {
   505  	return nl.setDrainingInternal(ctx, liveness, drain, nil /* reporter */)
   506  }
   507  
   508  func (nl *NodeLiveness) SetDecommissioningInternal(
   509  	ctx context.Context, nodeID roachpb.NodeID, liveness kvserverpb.Liveness, decommission bool,
   510  ) (changeCommitted bool, err error) {
   511  	return nl.setDecommissioningInternal(ctx, nodeID, liveness, decommission)
   512  }
   513  
   514  // GetCircuitBreaker returns the circuit breaker controlling
   515  // connection attempts to the specified node.
   516  func (t *RaftTransport) GetCircuitBreaker(
   517  	nodeID roachpb.NodeID, class rpc.ConnectionClass,
   518  ) *circuit.Breaker {
   519  	return t.dialer.GetCircuitBreaker(nodeID, class)
   520  }
   521  
   522  func WriteRandomDataToRange(
   523  	t testing.TB, store *Store, rangeID roachpb.RangeID, keyPrefix []byte,
   524  ) (midpoint []byte) {
   525  	src := rand.New(rand.NewSource(0))
   526  	for i := 0; i < 100; i++ {
   527  		key := append([]byte(nil), keyPrefix...)
   528  		key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...)
   529  		val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
   530  		pArgs := putArgs(key, val)
   531  		if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{
   532  			RangeID: rangeID,
   533  		}, &pArgs); pErr != nil {
   534  			t.Fatal(pErr)
   535  		}
   536  	}
   537  	// Return approximate midway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz").
   538  	midKey := append([]byte(nil), keyPrefix...)
   539  	midKey = append(midKey, []byte("Z")...)
   540  	return midKey
   541  }
   542  
   543  func WatchForDisappearingReplicas(t testing.TB, store *Store) {
   544  	m := make(map[int64]struct{})
   545  	for {
   546  		select {
   547  		case <-store.Stopper().ShouldQuiesce():
   548  			return
   549  		default:
   550  		}
   551  
   552  		store.mu.replicas.Range(func(k int64, v unsafe.Pointer) bool {
   553  			m[k] = struct{}{}
   554  			return true
   555  		})
   556  
   557  		for k := range m {
   558  			if _, ok := store.mu.replicas.Load(k); !ok {
   559  				t.Fatalf("r%d disappeared from Store.mu.replicas map", k)
   560  			}
   561  		}
   562  	}
   563  }