github.com/m3db/m3@v1.5.0/src/dbnode/integration/peers_bootstrap_merge_peer_blocks_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2016 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/dbnode/integration/generate"
    30  	"github.com/m3db/m3/src/dbnode/namespace"
    31  	"github.com/m3db/m3/src/dbnode/retention"
    32  	xtest "github.com/m3db/m3/src/x/test"
    33  	xtime "github.com/m3db/m3/src/x/time"
    34  
    35  	"github.com/stretchr/testify/require"
    36  )
    37  
    38  func TestPeersBootstrapMergePeerBlocks(t *testing.T) {
    39  	testPeersBootstrapMergePeerBlocks(t, nil, nil)
    40  }
    41  
    42  func TestProtoPeersBootstrapMergePeerBlocks(t *testing.T) {
    43  	testPeersBootstrapMergePeerBlocks(t, setProtoTestOptions, setProtoTestInputConfig)
    44  }
    45  
    46  func testPeersBootstrapMergePeerBlocks(t *testing.T, setTestOpts setTestOptions, updateInputConfig generate.UpdateBlockConfig) {
    47  	if testing.Short() {
    48  		t.SkipNow()
    49  	}
    50  
    51  	// Test setups
    52  	log := xtest.NewLogger(t)
    53  
    54  	retentionOpts := retention.NewOptions().
    55  		SetRetentionPeriod(20 * time.Hour).
    56  		SetBlockSize(2 * time.Hour).
    57  		SetBufferPast(10 * time.Minute).
    58  		SetBufferFuture(2 * time.Minute)
    59  	namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().
    60  		SetRetentionOptions(retentionOpts))
    61  	require.NoError(t, err)
    62  	opts := NewTestOptions(t).
    63  		SetNamespaces([]namespace.Metadata{namesp}).
    64  		// Use TChannel clients for writing / reading because we want to target individual nodes at a time
    65  		// and not write/read all nodes in the cluster.
    66  		SetUseTChannelClientForWriting(true).
    67  		SetUseTChannelClientForReading(true)
    68  	if setTestOpts != nil {
    69  		opts = setTestOpts(t, opts)
    70  		namesp = opts.Namespaces()[0]
    71  	}
    72  	setupOpts := []BootstrappableTestSetupOptions{
    73  		{DisablePeersBootstrapper: true},
    74  		{DisablePeersBootstrapper: true},
    75  		{
    76  			DisableCommitLogBootstrapper: true,
    77  			DisablePeersBootstrapper:     false,
    78  		},
    79  	}
    80  	setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts)
    81  	defer closeFn()
    82  
    83  	// Write test data alternating missing data for left/right nodes
    84  	now := setups[0].NowFn()()
    85  	blockSize := retentionOpts.BlockSize()
    86  	// Make sure we have multiple blocks of data for multiple series to exercise
    87  	// the grouping and aggregating logic in the client peer bootstrapping process
    88  	inputData := []generate.BlockConfig{
    89  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-4 * blockSize)},
    90  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-3 * blockSize)},
    91  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-2 * blockSize)},
    92  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-blockSize)},
    93  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now},
    94  	}
    95  	if updateInputConfig != nil {
    96  		updateInputConfig(inputData)
    97  	}
    98  	seriesMaps := generate.BlocksByStart(inputData)
    99  	left := make(map[xtime.UnixNano]generate.SeriesBlock)
   100  	right := make(map[xtime.UnixNano]generate.SeriesBlock)
   101  	remainder := 0
   102  	appendSeries := func(target map[xtime.UnixNano]generate.SeriesBlock, start time.Time, s generate.Series) {
   103  		var dataWithMissing []generate.TestValue
   104  		for i := range s.Data {
   105  			if i%2 != remainder {
   106  				continue
   107  			}
   108  			dataWithMissing = append(dataWithMissing, s.Data[i])
   109  		}
   110  		target[xtime.ToUnixNano(start)] = append(
   111  			target[xtime.ToUnixNano(start)],
   112  			generate.Series{ID: s.ID, Data: dataWithMissing},
   113  		)
   114  		remainder = 1 - remainder
   115  	}
   116  	for start, data := range seriesMaps {
   117  		for _, series := range data {
   118  			appendSeries(left, start.ToTime(), series)
   119  			appendSeries(right, start.ToTime(), series)
   120  		}
   121  	}
   122  	require.NoError(t, writeTestDataToDisk(namesp, setups[0], left, 0))
   123  	require.NoError(t, writeTestDataToDisk(namesp, setups[1], right, 0))
   124  
   125  	// Start the first two servers with filesystem bootstrappers
   126  	setups[:2].parallel(func(s TestSetup) {
   127  		require.NoError(t, s.StartServer())
   128  	})
   129  
   130  	// Start the last server with peers and filesystem bootstrappers
   131  	require.NoError(t, setups[2].StartServer())
   132  	log.Debug("servers are now up")
   133  
   134  	// Stop the servers
   135  	defer func() {
   136  		setups.parallel(func(s TestSetup) {
   137  			require.NoError(t, s.StopServer())
   138  		})
   139  		log.Debug("servers are now down")
   140  	}()
   141  
   142  	// Verify in-memory data match what we expect
   143  	verifySeriesMaps(t, setups[0], namesp.ID(), left)
   144  	verifySeriesMaps(t, setups[1], namesp.ID(), right)
   145  	verifySeriesMaps(t, setups[2], namesp.ID(), seriesMaps)
   146  }