github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/peers_bootstrap_node_down_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2016 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/dbnode/integration/generate"
    30  	"github.com/m3db/m3/src/dbnode/namespace"
    31  	"github.com/m3db/m3/src/dbnode/retention"
    32  	xtest "github.com/m3db/m3/src/x/test"
    33  
    34  	"github.com/stretchr/testify/require"
    35  )
    36  
    37  func TestPeersBootstrapNodeDown(t *testing.T) {
    38  	if testing.Short() {
    39  		t.SkipNow()
    40  	}
    41  
    42  	// Test setups
    43  	log := xtest.NewLogger(t)
    44  	retentionOpts := retention.NewOptions().
    45  		SetRetentionPeriod(20 * time.Hour).
    46  		SetBlockSize(2 * time.Hour).
    47  		SetBufferPast(10 * time.Minute).
    48  		SetBufferFuture(2 * time.Minute)
    49  
    50  	namesp, err := namespace.NewMetadata(testNamespaces[0],
    51  		namespace.NewOptions().SetRetentionOptions(retentionOpts))
    52  	require.NoError(t, err)
    53  	opts := NewTestOptions(t).
    54  		SetNamespaces([]namespace.Metadata{namesp}).
    55  		// Use TChannel clients for writing / reading because we want to target individual nodes at a time
    56  		// and not write/read all nodes in the cluster.
    57  		SetUseTChannelClientForWriting(true).
    58  		SetUseTChannelClientForReading(true)
    59  
    60  	setupOpts := []BootstrappableTestSetupOptions{
    61  		{DisablePeersBootstrapper: true},
    62  		{DisablePeersBootstrapper: true},
    63  		{
    64  			DisableCommitLogBootstrapper: true,
    65  			DisablePeersBootstrapper:     false,
    66  		},
    67  	}
    68  	setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts)
    69  	defer closeFn()
    70  
    71  	// Write test data for first node
    72  	now := setups[0].NowFn()()
    73  	blockSize := retentionOpts.BlockSize()
    74  	// Make sure we have multiple blocks of data for multiple series to exercise
    75  	// the grouping and aggregating logic in the client peer bootstrapping process
    76  	seriesMaps := generate.BlocksByStart([]generate.BlockConfig{
    77  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-4 * blockSize)},
    78  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-3 * blockSize)},
    79  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-2 * blockSize)},
    80  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-blockSize)},
    81  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now},
    82  	})
    83  	err = writeTestDataToDisk(namesp, setups[0], seriesMaps, 0)
    84  	require.NoError(t, err)
    85  
    86  	// Start the first server with filesystem bootstrapper
    87  	require.NoError(t, setups[0].StartServer())
    88  
    89  	// Leave second node down, start the last server with peers and filesystem bootstrappers
    90  	require.NoError(t, setups[2].StartServer())
    91  	log.Debug("first and third servers are now up")
    92  
    93  	// Stop the servers
    94  	defer func() {
    95  		testSetups{setups[0], setups[2]}.parallel(func(s TestSetup) {
    96  			require.NoError(t, s.StopServer())
    97  		})
    98  		log.Debug("servers are now down")
    99  	}()
   100  
   101  	// Verify in-memory data match what we expect
   102  	expect := testSetups{setups[0], setups[2]}
   103  	for _, setup := range expect {
   104  		verifySeriesMaps(t, setup, namesp.ID(), seriesMaps)
   105  	}
   106  }