github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/peers_bootstrap_single_node_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2018 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/cluster/services"
    30  	"github.com/m3db/m3/src/cluster/shard"
    31  	"github.com/m3db/m3/src/dbnode/integration/generate"
    32  	"github.com/m3db/m3/src/dbnode/namespace"
    33  	"github.com/m3db/m3/src/dbnode/retention"
    34  	"github.com/m3db/m3/src/dbnode/sharding"
    35  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/uninitialized"
    36  	"github.com/m3db/m3/src/dbnode/topology"
    37  	"github.com/m3db/m3/src/dbnode/topology/testutil"
    38  	xtest "github.com/m3db/m3/src/x/test"
    39  
    40  	"github.com/stretchr/testify/require"
    41  )
    42  
    43  // TestPeersBootstrapSingleNodeUninitialized makes sure that we can include the peer bootstrapper
    44  // in a single-node topology of a non-initialized cluster without causing a bootstrap failure or infinite hang.
    45  func TestPeersBootstrapSingleNodeUninitialized(t *testing.T) {
    46  	opts := NewTestOptions(t)
    47  
    48  	// Define a topology with initializing shards
    49  	minShard := uint32(0)
    50  	maxShard := uint32(opts.NumShards()) - uint32(1)
    51  	instances := []services.ServiceInstance{
    52  		node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Initializing)),
    53  	}
    54  
    55  	hostShardSets := []topology.HostShardSet{}
    56  	for _, instance := range instances {
    57  		h, err := topology.NewHostShardSetFromServiceInstance(instance, sharding.DefaultHashFn(opts.NumShards()))
    58  		require.NoError(t, err)
    59  		hostShardSets = append(hostShardSets, h)
    60  	}
    61  
    62  	shards := testutil.ShardsRange(minShard, maxShard, shard.Initializing)
    63  	shardSet, err := sharding.NewShardSet(
    64  		shards,
    65  		sharding.DefaultHashFn(int(maxShard)),
    66  	)
    67  	require.NoError(t, err)
    68  
    69  	topoOpts := topology.NewStaticOptions().
    70  		SetReplicas(len(instances)).
    71  		SetHostShardSets(hostShardSets).
    72  		SetShardSet(shardSet)
    73  	topoInit := topology.NewStaticInitializer(topoOpts)
    74  
    75  	setupOpts := []BootstrappableTestSetupOptions{
    76  		{
    77  			DisablePeersBootstrapper: false,
    78  			TopologyInitializer:      topoInit,
    79  			// This will bootstrap w/ unfulfilled ranges.
    80  			FinalBootstrapper: uninitialized.UninitializedTopologyBootstrapperName,
    81  		},
    82  	}
    83  	testPeersBootstrapSingleNode(t, setupOpts)
    84  }
    85  
    86  // TestPeersBootstrapSingleNodeInitialized makes sure that we can include the peer bootstrapper
    87  // in a single-node topology of already initialized cluster without causing a bootstrap failure or infinite hang.
    88  func TestPeersBootstrapSingleNodeInitialized(t *testing.T) {
    89  	setupOpts := []BootstrappableTestSetupOptions{
    90  		{DisablePeersBootstrapper: false},
    91  	}
    92  	testPeersBootstrapSingleNode(t, setupOpts)
    93  }
    94  
    95  func testPeersBootstrapSingleNode(t *testing.T, setupOpts []BootstrappableTestSetupOptions) {
    96  	if testing.Short() {
    97  		t.SkipNow()
    98  	}
    99  
   100  	// Test setups
   101  	log := xtest.NewLogger(t)
   102  	retentionOpts := retention.NewOptions().
   103  		SetRetentionPeriod(20 * time.Hour).
   104  		SetBlockSize(2 * time.Hour).
   105  		SetBufferPast(10 * time.Minute).
   106  		SetBufferFuture(2 * time.Minute)
   107  	namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(retentionOpts))
   108  	require.NoError(t, err)
   109  	opts := NewTestOptions(t).
   110  		SetNamespaces([]namespace.Metadata{namesp}).
   111  		// Use TChannel clients for writing / reading because we want to target individual nodes at a time
   112  		// and not write/read all nodes in the cluster.
   113  		SetUseTChannelClientForWriting(true).
   114  		SetUseTChannelClientForReading(true)
   115  
   116  	setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts)
   117  	defer closeFn()
   118  
   119  	// Write test data
   120  	now := setups[0].NowFn()()
   121  	blockSize := retentionOpts.BlockSize()
   122  	seriesMaps := generate.BlocksByStart([]generate.BlockConfig{
   123  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-4 * blockSize)},
   124  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-3 * blockSize)},
   125  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-2 * blockSize)},
   126  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-blockSize)},
   127  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now},
   128  	})
   129  	require.NoError(t, writeTestDataToDisk(namesp, setups[0], seriesMaps, 0))
   130  
   131  	// Set the time to one blockSize in the future (for which we do not have
   132  	// a fileset file) to ensure we try and use the peer bootstrapper.
   133  	setups[0].SetNowFn(now.Add(blockSize))
   134  
   135  	// Start the server with peers and filesystem bootstrappers
   136  	require.NoError(t, setups[0].StartServer())
   137  	log.Debug("servers are now up")
   138  
   139  	// Stop the servers
   140  	defer func() {
   141  		setups.parallel(func(s TestSetup) {
   142  			require.NoError(t, s.StopServer())
   143  		})
   144  		log.Debug("servers are now down")
   145  	}()
   146  
   147  	// Verify in-memory data match what we expect
   148  	for _, setup := range setups {
   149  		verifySeriesMaps(t, setup, namesp.ID(), seriesMaps)
   150  	}
   151  }