github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/peers_bootstrap_merge_local_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2016 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/dbnode/integration/generate"
    30  	"github.com/m3db/m3/src/dbnode/namespace"
    31  	"github.com/m3db/m3/src/dbnode/retention"
    32  	xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
    33  	xtest "github.com/m3db/m3/src/x/test"
    34  	xtime "github.com/m3db/m3/src/x/time"
    35  
    36  	"github.com/stretchr/testify/require"
    37  )
    38  
    39  func TestPeersBootstrapMergeLocal(t *testing.T) {
    40  	testPeersBootstrapMergeLocal(t, nil, nil)
    41  }
    42  
    43  func TestProtoPeersBootstrapMergeLocal(t *testing.T) {
    44  	testPeersBootstrapMergeLocal(t, setProtoTestOptions, setProtoTestInputConfig)
    45  }
    46  
    47  func testPeersBootstrapMergeLocal(t *testing.T, setTestOpts setTestOptions, updateInputConfig generate.UpdateBlockConfig) {
    48  	if testing.Short() {
    49  		t.SkipNow()
    50  	}
    51  
    52  	// Test setups
    53  	log := xtest.NewLogger(t)
    54  	retentionOpts := retention.NewOptions().
    55  		SetRetentionPeriod(6 * time.Hour).
    56  		SetBlockSize(2 * time.Hour).
    57  		SetBufferPast(10 * time.Minute).
    58  		SetBufferFuture(2 * time.Minute)
    59  	namesp, err := namespace.NewMetadata(testNamespaces[0],
    60  		namespace.NewOptions().SetRetentionOptions(retentionOpts))
    61  	require.NoError(t, err)
    62  
    63  	var (
    64  		opts = NewTestOptions(t).
    65  			SetNamespaces([]namespace.Metadata{namesp}).
    66  			// Use TChannel clients for writing / reading because we want to target individual nodes at a time
    67  			// and not write/read all nodes in the cluster.
    68  			SetUseTChannelClientForWriting(true).
    69  			SetUseTChannelClientForReading(true)
    70  
    71  		reporter = xmetrics.NewTestStatsReporter(xmetrics.NewTestStatsReporterOptions())
    72  
    73  		// Enable useTchannelClientForWriting because this test relies upon being
    74  		// able to write data to a single node, and the M3DB client does not support
    75  		// that, but we can accomplish it by using an individual nodes TChannel endpoints.
    76  		setupOpts = []BootstrappableTestSetupOptions{
    77  			{
    78  				DisablePeersBootstrapper:    true,
    79  				UseTChannelClientForWriting: true,
    80  			},
    81  			{
    82  				DisableCommitLogBootstrapper: true,
    83  				DisablePeersBootstrapper:     false,
    84  				UseTChannelClientForWriting:  true,
    85  				TestStatsReporter:            reporter,
    86  			},
    87  		}
    88  	)
    89  
    90  	if setTestOpts != nil {
    91  		opts = setTestOpts(t, opts)
    92  		namesp = opts.Namespaces()[0]
    93  	}
    94  
    95  	setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts)
    96  	defer closeFn()
    97  
    98  	// Write test data for first node, ensure to overflow past
    99  	now := setups[0].NowFn()()
   100  	cutoverAt := now.Add(retentionOpts.BufferFuture())
   101  	completeAt := now.Add(180 * time.Second)
   102  	blockSize := retentionOpts.BlockSize()
   103  	inputData := []generate.BlockConfig{
   104  		{IDs: []string{"foo", "bar"}, NumPoints: 180, Start: now.Add(-blockSize)},
   105  		{IDs: []string{"foo", "baz"}, NumPoints: int(completeAt.Sub(now) / time.Second), Start: now},
   106  	}
   107  	if updateInputConfig != nil {
   108  		updateInputConfig(inputData)
   109  	}
   110  	seriesMaps := generate.BlocksByStart(inputData)
   111  	firstNodeSeriesMaps := map[xtime.UnixNano]generate.SeriesBlock{}
   112  	directWritesSeriesMaps := map[xtime.UnixNano]generate.SeriesBlock{}
   113  	for start, s := range seriesMaps {
   114  		for i := range s {
   115  			isPartialSeries := start.Equal(now)
   116  			if !isPartialSeries {
   117  				// Normal series should just be straight up copied from first node
   118  				firstNodeSeriesMaps[start] = append(firstNodeSeriesMaps[start], s[i])
   119  				continue
   120  			}
   121  
   122  			firstNodeSeries := generate.Series{ID: s[i].ID}
   123  			directWritesSeries := generate.Series{ID: s[i].ID}
   124  			for j := range s[i].Data {
   125  				if s[i].Data[j].TimestampNanos.Before(cutoverAt) {
   126  					// If partial series and before cutover then splice between first node and second node
   127  					if j%2 == 0 {
   128  						firstNodeSeries.Data = append(firstNodeSeries.Data, s[i].Data[j])
   129  						continue
   130  					}
   131  					directWritesSeries.Data = append(directWritesSeries.Data, s[i].Data[j])
   132  					continue
   133  				}
   134  				// If after cutover just use as writes directly to the second node
   135  				directWritesSeries.Data = append(directWritesSeries.Data, s[i].Data[j])
   136  			}
   137  
   138  			firstNodeSeriesMaps[start] = append(firstNodeSeriesMaps[start], firstNodeSeries)
   139  			directWritesSeriesMaps[start] = append(directWritesSeriesMaps[start], directWritesSeries)
   140  		}
   141  	}
   142  
   143  	// Assert test data for first node is correct
   144  	require.Equal(t, 2, len(firstNodeSeriesMaps))
   145  
   146  	require.Equal(t, 2, firstNodeSeriesMaps[now.Add(-blockSize)].Len())
   147  	require.Equal(t, "foo", firstNodeSeriesMaps[now.Add(-blockSize)][0].ID.String())
   148  	require.Equal(t, 180, len(firstNodeSeriesMaps[now.Add(-blockSize)][0].Data))
   149  	require.Equal(t, "bar", firstNodeSeriesMaps[now.Add(-blockSize)][1].ID.String())
   150  	require.Equal(t, 180, len(firstNodeSeriesMaps[now.Add(-blockSize)][1].Data))
   151  
   152  	require.Equal(t, 2, firstNodeSeriesMaps[now].Len())
   153  	require.Equal(t, "foo", firstNodeSeriesMaps[now][0].ID.String())
   154  	require.Equal(t, 60, len(firstNodeSeriesMaps[now][0].Data))
   155  	require.Equal(t, "baz", firstNodeSeriesMaps[now][1].ID.String())
   156  	require.Equal(t, 60, len(firstNodeSeriesMaps[now][1].Data))
   157  
   158  	// Assert test data for direct writes is correct
   159  	require.Equal(t, 1, len(directWritesSeriesMaps))
   160  
   161  	require.Equal(t, 2, directWritesSeriesMaps[now].Len())
   162  	require.Equal(t, "foo", directWritesSeriesMaps[now][0].ID.String())
   163  	require.Equal(t, 120, len(directWritesSeriesMaps[now][0].Data))
   164  	require.Equal(t, "baz", directWritesSeriesMaps[now][1].ID.String())
   165  	require.Equal(t, 120, len(directWritesSeriesMaps[now][1].Data))
   166  
   167  	// Write data to first node
   168  	err = writeTestDataToDisk(namesp, setups[0], firstNodeSeriesMaps, 0)
   169  	require.NoError(t, err)
   170  
   171  	// Start the first server with filesystem bootstrapper
   172  	require.NoError(t, setups[0].StartServer())
   173  
   174  	secondNodeIsUp := make(chan struct{})
   175  	doneWriting := make(chan struct{})
   176  	go func() {
   177  		// Wait for bootstrapping to occur
   178  		for reporter.Counters()["database.bootstrap.start"] == 0 {
   179  			time.Sleep(10 * time.Millisecond)
   180  		}
   181  
   182  		<-secondNodeIsUp
   183  
   184  		// Progress time before writing data directly to second node
   185  		setups[1].SetNowFn(completeAt)
   186  
   187  		// Write data that "arrives" at the second node directly
   188  		err := setups[1].WriteBatch(namesp.ID(),
   189  			directWritesSeriesMaps[now])
   190  		if err != nil {
   191  			panic(err)
   192  		}
   193  
   194  		doneWriting <- struct{}{}
   195  	}()
   196  
   197  	// Start the last server with peers and filesystem bootstrappers
   198  	require.NoError(t, setups[1].StartServer())
   199  	log.Debug("servers are now up")
   200  
   201  	secondNodeIsUp <- struct{}{}
   202  	<-doneWriting
   203  
   204  	// Stop the servers
   205  	defer func() {
   206  		setups.parallel(func(s TestSetup) {
   207  			require.NoError(t, s.StopServer())
   208  		})
   209  		log.Debug("servers are now down")
   210  	}()
   211  
   212  	// Verify in-memory data match what we expect
   213  	verifySeriesMaps(t, setups[0], namesp.ID(), firstNodeSeriesMaps)
   214  	verifySeriesMaps(t, setups[1], namesp.ID(), seriesMaps)
   215  }