github.com/matrixorigin/matrixone@v0.7.0/pkg/logservice/store_test.go (about)

     1  // Copyright 2021 - 2022 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package logservice
    16  
    17  import (
    18  	"context"
    19  	"math"
    20  	"sync/atomic"
    21  	"testing"
    22  	"time"
    23  
    24  	"github.com/google/uuid"
    25  	"github.com/lni/dragonboat/v4"
    26  	"github.com/lni/goutils/leaktest"
    27  	"github.com/lni/vfs"
    28  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    29  	"github.com/matrixorigin/matrixone/pkg/common/runtime"
    30  	"github.com/matrixorigin/matrixone/pkg/hakeeper"
    31  	"github.com/matrixorigin/matrixone/pkg/logutil"
    32  	pb "github.com/matrixorigin/matrixone/pkg/pb/logservice"
    33  	"github.com/matrixorigin/matrixone/pkg/pb/metadata"
    34  	"github.com/matrixorigin/matrixone/pkg/taskservice"
    35  	"github.com/stretchr/testify/assert"
    36  	"github.com/stretchr/testify/require"
    37  )
    38  
    39  func TestMain(m *testing.M) {
    40  	logutil.SetupMOLogger(&logutil.LogConfig{
    41  		Level:  "debug",
    42  		Format: "console",
    43  	})
    44  
    45  	runtime.SetupProcessLevelRuntime(runtime.NewRuntime(metadata.ServiceType_LOG, "test", logutil.GetGlobalLogger()))
    46  	m.Run()
    47  }
    48  
    49  var (
    50  	testIOTimeout = 5 * time.Second
    51  )
    52  
    53  func TestNodeHostConfig(t *testing.T) {
    54  	cfg := Config{
    55  		DeploymentID: 1234,
    56  		DataDir:      "lalala",
    57  	}
    58  	cfg.Fill()
    59  	nhConfig := getNodeHostConfig(cfg)
    60  	assert.Equal(t, cfg.DeploymentID, nhConfig.DeploymentID)
    61  	assert.Equal(t, cfg.DataDir, nhConfig.NodeHostDir)
    62  	assert.True(t, nhConfig.AddressByNodeHostID)
    63  }
    64  
    65  func TestRaftConfig(t *testing.T) {
    66  	cfg := getRaftConfig(1, 1)
    67  	assert.True(t, cfg.CheckQuorum)
    68  	assert.True(t, cfg.OrderedConfigChange)
    69  }
    70  
    71  func getStoreTestConfig() Config {
    72  	cfg := Config{
    73  		UUID:                uuid.New().String(),
    74  		RTTMillisecond:      10,
    75  		GossipAddress:       testGossipAddress,
    76  		GossipSeedAddresses: []string{testGossipAddress, dummyGossipSeedAddress},
    77  		DeploymentID:        1,
    78  		FS:                  vfs.NewStrictMem(),
    79  		UseTeeLogDB:         true,
    80  	}
    81  	cfg.Fill()
    82  	return cfg
    83  }
    84  
    85  func TestStoreCanBeCreatedAndClosed(t *testing.T) {
    86  	defer leaktest.AfterTest(t)()
    87  	cfg := getStoreTestConfig()
    88  	defer vfs.ReportLeakedFD(cfg.FS, t)
    89  	store, err := newLogStore(cfg, nil, runtime.DefaultRuntime())
    90  	assert.NoError(t, err)
    91  	runtime.DefaultRuntime().Logger().Info("1")
    92  	defer func() {
    93  		assert.NoError(t, store.close())
    94  	}()
    95  	runtime.DefaultRuntime().Logger().Info("2")
    96  }
    97  
    98  func getTestStore(cfg Config, startLogReplica bool, taskService taskservice.TaskService) (*store, error) {
    99  	store, err := newLogStore(cfg, func() taskservice.TaskService { return taskService }, runtime.DefaultRuntime())
   100  	if err != nil {
   101  		return nil, err
   102  	}
   103  	if startLogReplica {
   104  		peers := make(map[uint64]dragonboat.Target)
   105  		peers[2] = store.nh.ID()
   106  		if err := store.startReplica(1, 2, peers, false); err != nil {
   107  			store.close()
   108  			return nil, err
   109  		}
   110  	}
   111  	return store, nil
   112  }
   113  
   114  func TestHAKeeperCanBeStarted(t *testing.T) {
   115  	defer leaktest.AfterTest(t)()
   116  	cfg := getStoreTestConfig()
   117  	defer vfs.ReportLeakedFD(cfg.FS, t)
   118  	store, err := newLogStore(cfg, nil, runtime.DefaultRuntime())
   119  	assert.NoError(t, err)
   120  	peers := make(map[uint64]dragonboat.Target)
   121  	peers[2] = store.nh.ID()
   122  	assert.NoError(t, store.startHAKeeperReplica(2, peers, false))
   123  	defer func() {
   124  		assert.NoError(t, store.close())
   125  	}()
   126  	mustHaveReplica(t, store, hakeeper.DefaultHAKeeperShardID, 2)
   127  }
   128  
   129  func TestStateMachineCanBeStarted(t *testing.T) {
   130  	defer leaktest.AfterTest(t)()
   131  	cfg := getStoreTestConfig()
   132  	defer vfs.ReportLeakedFD(cfg.FS, t)
   133  	store, err := getTestStore(cfg, true, nil)
   134  	assert.NoError(t, err)
   135  	defer func() {
   136  		assert.NoError(t, store.close())
   137  	}()
   138  	mustHaveReplica(t, store, 1, 2)
   139  }
   140  
   141  func TestReplicaCanBeStopped(t *testing.T) {
   142  	defer leaktest.AfterTest(t)()
   143  	cfg := getStoreTestConfig()
   144  	defer vfs.ReportLeakedFD(cfg.FS, t)
   145  	store, err := getTestStore(cfg, true, nil)
   146  	assert.NoError(t, err)
   147  	defer func() {
   148  		assert.NoError(t, store.close())
   149  	}()
   150  	mustHaveReplica(t, store, 1, 2)
   151  	require.NoError(t, store.stopReplica(1, 2))
   152  	assert.False(t, hasReplica(store, 1, 2))
   153  }
   154  
   155  func runStoreTest(t *testing.T, fn func(*testing.T, *store)) {
   156  	defer leaktest.AfterTest(t)()
   157  	cfg := getStoreTestConfig()
   158  	defer vfs.ReportLeakedFD(cfg.FS, t)
   159  	store, err := getTestStore(cfg, true, nil)
   160  	assert.NoError(t, err)
   161  	defer func() {
   162  		assert.NoError(t, store.close())
   163  	}()
   164  	fn(t, store)
   165  }
   166  
   167  func getTestUserEntry() []byte {
   168  	cmd := make([]byte, headerSize+8+8)
   169  	binaryEnc.PutUint32(cmd, uint32(pb.UserEntryUpdate))
   170  	binaryEnc.PutUint64(cmd[headerSize:], 100)
   171  	binaryEnc.PutUint64(cmd[headerSize+8:], 1234567890)
   172  	return cmd
   173  }
   174  
   175  func TestGetOrExtendLease(t *testing.T) {
   176  	fn := func(t *testing.T, store *store) {
   177  		ctx, cancel := context.WithTimeout(context.Background(), testIOTimeout)
   178  		defer cancel()
   179  		assert.NoError(t, store.getOrExtendDNLease(ctx, 1, 100))
   180  	}
   181  	runStoreTest(t, fn)
   182  }
   183  
   184  func TestAppendLog(t *testing.T) {
   185  	fn := func(t *testing.T, store *store) {
   186  		ctx, cancel := context.WithTimeout(context.Background(), testIOTimeout)
   187  		defer cancel()
   188  		assert.NoError(t, store.getOrExtendDNLease(ctx, 1, 100))
   189  		cmd := getTestUserEntry()
   190  		lsn, err := store.append(ctx, 1, cmd)
   191  		assert.NoError(t, err)
   192  		assert.Equal(t, uint64(4), lsn)
   193  	}
   194  	runStoreTest(t, fn)
   195  }
   196  
   197  func TestAppendLogIsRejectedForMismatchedLeaseHolderID(t *testing.T) {
   198  	fn := func(t *testing.T, store *store) {
   199  		ctx, cancel := context.WithTimeout(context.Background(), testIOTimeout)
   200  		defer cancel()
   201  		assert.NoError(t, store.getOrExtendDNLease(ctx, 1, 100))
   202  		cmd := make([]byte, headerSize+8+8)
   203  		binaryEnc.PutUint32(cmd, uint32(pb.UserEntryUpdate))
   204  		binaryEnc.PutUint64(cmd[headerSize:], 101)
   205  		binaryEnc.PutUint64(cmd[headerSize+8:], 1234567890)
   206  		_, err := store.append(ctx, 1, cmd)
   207  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotLeaseHolder))
   208  	}
   209  	runStoreTest(t, fn)
   210  }
   211  
   212  func TestStoreTsoUpdate(t *testing.T) {
   213  	fn := func(t *testing.T, store *store) {
   214  		ctx, cancel := context.WithTimeout(context.Background(), testIOTimeout)
   215  		defer cancel()
   216  		v1, err := store.tsoUpdate(ctx, 100)
   217  		require.NoError(t, err)
   218  		assert.Equal(t, uint64(1), v1)
   219  		v2, err := store.tsoUpdate(ctx, 1000)
   220  		require.NoError(t, err)
   221  		assert.Equal(t, uint64(101), v2)
   222  	}
   223  	runStoreTest(t, fn)
   224  }
   225  
   226  func TestTruncateLog(t *testing.T) {
   227  	fn := func(t *testing.T, store *store) {
   228  		ctx, cancel := context.WithTimeout(context.Background(), testIOTimeout)
   229  		defer cancel()
   230  		assert.NoError(t, store.getOrExtendDNLease(ctx, 1, 100))
   231  		cmd := getTestUserEntry()
   232  		_, err := store.append(ctx, 1, cmd)
   233  		assert.NoError(t, err)
   234  		assert.NoError(t, store.truncateLog(ctx, 1, 4))
   235  		err = store.truncateLog(ctx, 1, 3)
   236  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidTruncateLsn))
   237  	}
   238  	runStoreTest(t, fn)
   239  }
   240  
   241  func TestGetTruncatedIndex(t *testing.T) {
   242  	fn := func(t *testing.T, store *store) {
   243  		ctx, cancel := context.WithTimeout(context.Background(), testIOTimeout)
   244  		defer cancel()
   245  		index, err := store.getTruncatedLsn(ctx, 1)
   246  		assert.Equal(t, uint64(0), index)
   247  		assert.NoError(t, err)
   248  		assert.NoError(t, store.getOrExtendDNLease(ctx, 1, 100))
   249  		cmd := getTestUserEntry()
   250  		_, err = store.append(ctx, 1, cmd)
   251  		assert.NoError(t, err)
   252  		assert.NoError(t, store.truncateLog(ctx, 1, 4))
   253  		index, err = store.getTruncatedLsn(ctx, 1)
   254  		assert.Equal(t, uint64(4), index)
   255  		assert.NoError(t, err)
   256  	}
   257  	runStoreTest(t, fn)
   258  }
   259  
   260  func TestQueryLog(t *testing.T) {
   261  	fn := func(t *testing.T, store *store) {
   262  		ctx, cancel := context.WithTimeout(context.Background(), testIOTimeout)
   263  		defer cancel()
   264  		assert.NoError(t, store.getOrExtendDNLease(ctx, 1, 100))
   265  		cmd := getTestUserEntry()
   266  		_, err := store.append(ctx, 1, cmd)
   267  		assert.NoError(t, err)
   268  		entries, lsn, err := store.queryLog(ctx, 1, 4, math.MaxUint64)
   269  		assert.NoError(t, err)
   270  		assert.Equal(t, 1, len(entries))
   271  		assert.Equal(t, uint64(4), lsn)
   272  		assert.Equal(t, entries[0].Data, cmd)
   273  		// leaseholder ID update cmd at entry index 3
   274  		entries, lsn, err = store.queryLog(ctx, 1, 3, math.MaxUint64)
   275  		assert.NoError(t, err)
   276  		assert.Equal(t, 2, len(entries))
   277  		assert.Equal(t, uint64(3), lsn)
   278  		assert.Equal(t, cmd, entries[1].Data)
   279  		assert.Equal(t, pb.LeaseUpdate, entries[0].Type)
   280  		assert.Equal(t, pb.UserRecord, entries[1].Type)
   281  
   282  		// size limited
   283  		_, err = store.append(ctx, 1, cmd)
   284  		assert.NoError(t, err)
   285  		entries, lsn, err = store.queryLog(ctx, 1, 4, 1)
   286  		assert.NoError(t, err)
   287  		assert.Equal(t, 1, len(entries))
   288  		assert.Equal(t, uint64(5), lsn)
   289  		assert.Equal(t, entries[0].Data, cmd)
   290  		// more log available
   291  		entries, lsn, err = store.queryLog(ctx, 1, 5, 1)
   292  		assert.NoError(t, err)
   293  		assert.Equal(t, 1, len(entries))
   294  		assert.Equal(t, uint64(5), lsn)
   295  		assert.Equal(t, entries[0].Data, cmd)
   296  	}
   297  	runStoreTest(t, fn)
   298  }
   299  
   300  func TestHAKeeperTick(t *testing.T) {
   301  	fn := func(t *testing.T, store *store) {
   302  		peers := make(map[uint64]dragonboat.Target)
   303  		peers[1] = store.id()
   304  		assert.NoError(t, store.startHAKeeperReplica(1, peers, false))
   305  		store.hakeeperTick()
   306  	}
   307  	runStoreTest(t, fn)
   308  }
   309  
   310  func TestAddScheduleCommands(t *testing.T) {
   311  	fn := func(t *testing.T, store *store) {
   312  		peers := make(map[uint64]dragonboat.Target)
   313  		peers[1] = store.id()
   314  		assert.NoError(t, store.startHAKeeperReplica(1, peers, false))
   315  		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   316  		defer cancel()
   317  		sc1 := pb.ScheduleCommand{
   318  			UUID: "uuid1",
   319  			ConfigChange: &pb.ConfigChange{
   320  				Replica: pb.Replica{
   321  					ShardID: 1,
   322  				},
   323  			},
   324  		}
   325  		sc2 := pb.ScheduleCommand{
   326  			UUID: "uuid2",
   327  			ConfigChange: &pb.ConfigChange{
   328  				Replica: pb.Replica{
   329  					ShardID: 2,
   330  				},
   331  			},
   332  		}
   333  		sc3 := pb.ScheduleCommand{
   334  			UUID: "uuid1",
   335  			ConfigChange: &pb.ConfigChange{
   336  				Replica: pb.Replica{
   337  					ShardID: 3,
   338  				},
   339  			},
   340  		}
   341  		require.NoError(t,
   342  			store.addScheduleCommands(ctx, 1, []pb.ScheduleCommand{sc1, sc2, sc3}))
   343  		cb, err := store.getCommandBatch(ctx, "uuid1")
   344  		require.NoError(t, err)
   345  		assert.Equal(t, []pb.ScheduleCommand{sc1, sc3}, cb.Commands)
   346  		cb, err = store.getCommandBatch(ctx, "uuid2")
   347  		require.NoError(t, err)
   348  		assert.Equal(t, []pb.ScheduleCommand{sc2}, cb.Commands)
   349  	}
   350  	runStoreTest(t, fn)
   351  }
   352  
   353  func TestGetHeartbeatMessage(t *testing.T) {
   354  	fn := func(t *testing.T, store *store) {
   355  		peers := make(map[uint64]dragonboat.Target)
   356  		peers[1] = store.id()
   357  		assert.NoError(t, store.startReplica(10, 1, peers, false))
   358  		assert.NoError(t, store.startHAKeeperReplica(1, peers, false))
   359  
   360  		for i := 0; i < 5000; i++ {
   361  			m := store.getHeartbeatMessage()
   362  			if len(m.Replicas) != 3 {
   363  				time.Sleep(time.Millisecond)
   364  			} else {
   365  				return
   366  			}
   367  		}
   368  		t.Fatalf("failed to get all replicas details from heartbeat message")
   369  	}
   370  	runStoreTest(t, fn)
   371  }
   372  
   373  func TestAddHeartbeat(t *testing.T) {
   374  	fn := func(t *testing.T, store *store) {
   375  		peers := make(map[uint64]dragonboat.Target)
   376  		peers[1] = store.id()
   377  		assert.NoError(t, store.startHAKeeperReplica(1, peers, false))
   378  
   379  		m := store.getHeartbeatMessage()
   380  		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   381  		defer cancel()
   382  		_, err := store.addLogStoreHeartbeat(ctx, m)
   383  		assert.NoError(t, err)
   384  
   385  		cnMsg := pb.CNStoreHeartbeat{
   386  			UUID: store.id(),
   387  		}
   388  		_, err = store.addCNStoreHeartbeat(ctx, cnMsg)
   389  		assert.NoError(t, err)
   390  
   391  		dnMsg := pb.DNStoreHeartbeat{
   392  			UUID:   store.id(),
   393  			Shards: make([]pb.DNShardInfo, 0),
   394  		}
   395  		dnMsg.Shards = append(dnMsg.Shards, pb.DNShardInfo{ShardID: 2, ReplicaID: 3})
   396  		_, err = store.addDNStoreHeartbeat(ctx, dnMsg)
   397  		assert.NoError(t, err)
   398  	}
   399  	runStoreTest(t, fn)
   400  }
   401  
   402  func TestAddReplicaRejectedForInvalidCCI(t *testing.T) {
   403  	fn := func(t *testing.T, store *store) {
   404  		err := store.addReplica(1, 100, uuid.New().String(), 0)
   405  		assert.Equal(t, dragonboat.ErrRejected, err)
   406  	}
   407  	runStoreTest(t, fn)
   408  }
   409  
   410  func TestAddReplica(t *testing.T) {
   411  	fn := func(t *testing.T, store *store) {
   412  		for {
   413  			_, _, ok, err := store.nh.GetLeaderID(1)
   414  			require.NoError(t, err)
   415  			if ok {
   416  				break
   417  			}
   418  			time.Sleep(time.Millisecond)
   419  		}
   420  		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   421  		defer cancel()
   422  		m, err := store.nh.SyncGetShardMembership(ctx, 1)
   423  		require.NoError(t, err)
   424  		err = store.addReplica(1, 100, uuid.New().String(), m.ConfigChangeID)
   425  		assert.NoError(t, err)
   426  		hb := store.getHeartbeatMessage()
   427  		assert.Equal(t, 2, len(hb.Replicas[0].Replicas))
   428  	}
   429  	runStoreTest(t, fn)
   430  }
   431  
   432  func getTestStores() (*store, *store, error) {
   433  	cfg1 := Config{
   434  		FS:                  vfs.NewStrictMem(),
   435  		DeploymentID:        1,
   436  		RTTMillisecond:      5,
   437  		DataDir:             "data-1",
   438  		ServiceAddress:      "127.0.0.1:9001",
   439  		RaftAddress:         "127.0.0.1:9002",
   440  		GossipAddress:       "127.0.0.1:9011",
   441  		GossipSeedAddresses: []string{"127.0.0.1:9011", "127.0.0.1:9012"},
   442  	}
   443  	cfg1.Fill()
   444  	store1, err := newLogStore(cfg1, nil, runtime.DefaultRuntime())
   445  	if err != nil {
   446  		return nil, nil, err
   447  	}
   448  	cfg2 := Config{
   449  		FS:                  vfs.NewStrictMem(),
   450  		DeploymentID:        1,
   451  		RTTMillisecond:      5,
   452  		DataDir:             "data-1",
   453  		ServiceAddress:      "127.0.0.1:9006",
   454  		RaftAddress:         "127.0.0.1:9007",
   455  		GossipAddress:       "127.0.0.1:9012",
   456  		GossipSeedAddresses: []string{"127.0.0.1:9011", "127.0.0.1:9012"},
   457  	}
   458  	cfg2.Fill()
   459  	store2, err := newLogStore(cfg2, nil, runtime.DefaultRuntime())
   460  	if err != nil {
   461  		return nil, nil, err
   462  	}
   463  
   464  	peers1 := make(map[uint64]dragonboat.Target)
   465  	peers1[1] = store1.nh.ID()
   466  	peers1[2] = store2.nh.ID()
   467  	if err := store1.startReplica(1, 1, peers1, false); err != nil {
   468  		return nil, nil, err
   469  	}
   470  	peers2 := make(map[uint64]dragonboat.Target)
   471  	peers2[1] = store1.nh.ID()
   472  	peers2[2] = store2.nh.ID()
   473  	if err := store2.startReplica(1, 2, peers2, false); err != nil {
   474  		return nil, nil, err
   475  	}
   476  
   477  	for i := 0; i <= 30000; i++ {
   478  		leaderID, _, ok, err := store1.nh.GetLeaderID(1)
   479  		if err != nil {
   480  			return nil, nil, err
   481  		}
   482  		if ok && leaderID == 1 {
   483  			break
   484  		}
   485  		if ok && leaderID != 1 {
   486  			if err := store1.requestLeaderTransfer(1, 1); err != nil {
   487  				runtime.DefaultRuntime().Logger().Error("failed to transfer leader")
   488  			}
   489  		}
   490  		time.Sleep(time.Millisecond)
   491  		if i == 30000 {
   492  			panic("failed to have leader elected in 30 seconds")
   493  		}
   494  	}
   495  	return store1, store2, nil
   496  }
   497  
   498  func TestRemoveReplica(t *testing.T) {
   499  	store1, store2, err := getTestStores()
   500  	require.NoError(t, err)
   501  	defer func() {
   502  		require.NoError(t, store1.close())
   503  		require.NoError(t, store2.close())
   504  	}()
   505  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   506  	defer cancel()
   507  	for {
   508  		m, err := store1.nh.SyncGetShardMembership(ctx, 1)
   509  		if err == dragonboat.ErrShardNotReady {
   510  			time.Sleep(time.Millisecond)
   511  			continue
   512  		}
   513  		require.NoError(t, err)
   514  		require.NoError(t, store1.removeReplica(1, 2, m.ConfigChangeID))
   515  		return
   516  	}
   517  }
   518  
   519  func TestStopReplicaCanResetHAKeeperReplicaID(t *testing.T) {
   520  	fn := func(t *testing.T, store *store) {
   521  		peers := make(map[uint64]dragonboat.Target)
   522  		peers[1] = store.id()
   523  		assert.NoError(t, store.startHAKeeperReplica(1, peers, false))
   524  		assert.Equal(t, uint64(1), atomic.LoadUint64(&store.haKeeperReplicaID))
   525  		assert.NoError(t, store.stopReplica(hakeeper.DefaultHAKeeperShardID, 1))
   526  		assert.Equal(t, uint64(0), atomic.LoadUint64(&store.haKeeperReplicaID))
   527  	}
   528  	runStoreTest(t, fn)
   529  }
   530  
   531  func hasShard(s *store, shardID uint64) bool {
   532  	hb := s.getHeartbeatMessage()
   533  	for _, info := range hb.Replicas {
   534  		if info.ShardID == shardID {
   535  			return true
   536  		}
   537  	}
   538  	return false
   539  }
   540  
   541  func hasReplica(s *store, shardID uint64, replicaID uint64) bool {
   542  	hb := s.getHeartbeatMessage()
   543  	for _, info := range hb.Replicas {
   544  		if info.ShardID == shardID {
   545  			for r := range info.Replicas {
   546  				if r == replicaID {
   547  					return true
   548  				}
   549  			}
   550  		}
   551  	}
   552  	return false
   553  }
   554  
   555  func mustHaveReplica(t *testing.T,
   556  	s *store, shardID uint64, replicaID uint64) {
   557  	for i := 0; i < 100; i++ {
   558  		if hasReplica(s, shardID, replicaID) {
   559  			return
   560  		}
   561  		time.Sleep(10 * time.Millisecond)
   562  	}
   563  	t.Fatalf("failed to locate the replica")
   564  }