github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/commitlog_bootstrap_unowned_shard_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2020 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"fmt"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/m3db/m3/src/cluster/services"
    31  	"github.com/m3db/m3/src/cluster/shard"
    32  	"github.com/m3db/m3/src/dbnode/integration/fake"
    33  	"github.com/m3db/m3/src/dbnode/namespace"
    34  	"github.com/m3db/m3/src/dbnode/retention"
    35  	"github.com/m3db/m3/src/dbnode/topology"
    36  	xtest "github.com/m3db/m3/src/x/test"
    37  
    38  	"github.com/stretchr/testify/require"
    39  )
    40  
    41  func TestCommitLogBootstrapUnownedShard(t *testing.T) {
    42  	if testing.Short() {
    43  		t.SkipNow()
    44  	}
    45  
    46  	log := xtest.NewLogger(t)
    47  	retentionOpts := retention.NewOptions().
    48  		SetRetentionPeriod(20 * time.Hour).
    49  		SetBlockSize(2 * time.Hour).
    50  		SetBufferPast(10 * time.Minute).
    51  		SetBufferFuture(10 * time.Minute)
    52  	blockSize := retentionOpts.BlockSize()
    53  
    54  	ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().
    55  		SetRetentionOptions(retentionOpts))
    56  	require.NoError(t, err)
    57  	numShards := 6
    58  
    59  	// Helper function to create node instances for fake cluster service.
    60  	node := func(index int, shards []uint32) services.ServiceInstance {
    61  		id := fmt.Sprintf("testhost%d", index)
    62  		endpoint := fmt.Sprintf("127.0.0.1:%d", multiAddrPortStart+(index*multiAddrPortEach))
    63  
    64  		result := services.NewServiceInstance().
    65  			SetInstanceID(id).
    66  			SetEndpoint(endpoint)
    67  		resultShards := make([]shard.Shard, len(shards))
    68  		for i, id := range shards {
    69  			resultShards[i] = shard.NewShard(id).SetState(shard.Available)
    70  		}
    71  		return result.SetShards(shard.NewShards(resultShards))
    72  	}
    73  
    74  	// Pretend there are two nodes sharing 6 shards (RF1).
    75  	node0OwnedShards := []uint32{0, 1, 2}
    76  	svc := fake.NewM3ClusterService().
    77  		SetInstances([]services.ServiceInstance{
    78  			node(0, node0OwnedShards),
    79  			node(1, []uint32{3, 4, 5}),
    80  		}).
    81  		SetReplication(services.NewServiceReplication().SetReplicas(1)).
    82  		SetSharding(services.NewServiceSharding().SetNumShards(numShards))
    83  	svcs := fake.NewM3ClusterServices()
    84  	svcs.RegisterService("m3db", svc)
    85  	topoOpts := topology.NewDynamicOptions().
    86  		SetConfigServiceClient(fake.NewM3ClusterClient(svcs, nil))
    87  	topoInit := topology.NewDynamicInitializer(topoOpts)
    88  
    89  	opts := NewTestOptions(t).
    90  		SetNamespaces([]namespace.Metadata{ns1}).
    91  		SetNumShards(numShards)
    92  	setupOpts := []BootstrappableTestSetupOptions{
    93  		{
    94  			DisablePeersBootstrapper: true,
    95  			TopologyInitializer:      topoInit,
    96  		},
    97  		{
    98  			DisablePeersBootstrapper: true,
    99  			TopologyInitializer:      topoInit,
   100  		},
   101  	}
   102  
   103  	setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts)
   104  	defer closeFn()
   105  
   106  	// Only set this up for the first setup because we're only writing commit
   107  	// logs for the first server.
   108  	setup := setups[0]
   109  	commitLogOpts := setup.StorageOpts().CommitLogOptions().
   110  		SetFlushInterval(defaultIntegrationTestFlushInterval)
   111  	setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
   112  
   113  	log.Info("generating data")
   114  	now := setup.NowFn()()
   115  	seriesMaps := generateSeriesMaps(30, nil, now.Add(-2*blockSize), now.Add(-blockSize))
   116  	log.Info("writing data")
   117  	// Write commit log with generated data that spreads across all shards
   118  	// (including shards that this node should not own). This node should still
   119  	// be able to bootstrap successfully with commit log entries from shards
   120  	// that it does not own.
   121  	writeCommitLogData(t, setup, commitLogOpts, seriesMaps, ns1, false)
   122  	log.Info("finished writing data")
   123  
   124  	// Setup bootstrapper after writing data so filesystem inspection can find it.
   125  	setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
   126  
   127  	// Start the servers.
   128  	for _, setup := range setups {
   129  		require.NoError(t, setup.StartServer())
   130  	}
   131  
   132  	// Defer stop the servers.
   133  	defer func() {
   134  		setups.parallel(func(s TestSetup) {
   135  			require.NoError(t, s.StopServer())
   136  		})
   137  		log.Debug("servers are now down")
   138  	}()
   139  
   140  	// Only fetch blocks for shards owned by node 0.
   141  	metadatasByShard, err := m3dbClientFetchBlocksMetadata(
   142  		setup.M3DBVerificationAdminClient(), testNamespaces[0], node0OwnedShards,
   143  		now.Add(-2*blockSize), now, topology.ReadConsistencyLevelMajority)
   144  	require.NoError(t, err)
   145  
   146  	observedSeriesMaps := testSetupToSeriesMaps(t, setup, ns1, metadatasByShard)
   147  	// Filter out the written series that node 0 does not own.
   148  	filteredSeriesMaps := filterSeriesByShard(setup, seriesMaps, node0OwnedShards)
   149  	// Expect to only see data that node 0 owns.
   150  	verifySeriesMapsEqual(t, filteredSeriesMaps, observedSeriesMaps)
   151  }